query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Returns an XML representation of the Orientation instance.
def to_xml(self, doc): print('deprecated as we are moving to hdf5 format') orientation = doc.createElement('Orientation') orientation_phi1 = doc.createElement('phi1') orientation_phi1_text = doc.createTextNode('%f' % self.phi1()) orientation_phi1.appendChild(orientation_phi1_text) orientation.appendChild(orientation_phi1) orientation_Phi = doc.createElement('Phi') orientation_Phi_text = doc.createTextNode('%f' % self.Phi()) orientation_Phi.appendChild(orientation_Phi_text) orientation.appendChild(orientation_Phi) orientation_phi2 = doc.createElement('phi2') orientation_phi2_text = doc.createTextNode('%f' % self.phi2()) orientation_phi2.appendChild(orientation_phi2_text) orientation.appendChild(orientation_phi2) return orientation
[ "def getOrientation(self):\n return self.getTag(\"Orientation#\", 1)", "def orientation(self):\r\n tag=self.readinfo('Image Orientation Patient')\r\n \r\n if tag==None:\r\n name=None\r\n elif tag==[-0,1,0,-0,-0,-1]:\r\n name=1 #Sagittal\r\n elif tag==[-1,-0,0,-0,-1,0]:\r\n name=2 #Axial\r\n elif tag==[1,0,0,0,0,-1]:\r\n name=3 #Coronal\r\n else:\r\n name=4 #Oblique\r\n self.orient=name\r\n return", "def get_orientation(self):\n pose = self.get_pose()\n orientation = np.array(pose.r)\n return orientation", "def xml(self):\n return oxml_tostring(self, encoding='UTF-8', standalone=True)", "def to_xml(self):\n\n base_elem = ET.Element(\"symmetry\")\n x_elem = ET.SubElement(base_elem, \"x\")\n x_elem.text = self._symmetry_type_to_text[self.x_symmetry]\n y_elem = ET.SubElement(base_elem, \"y\")\n y_elem.text = self._symmetry_type_to_text[self.y_symmetry]\n z_elem = ET.SubElement(base_elem, \"z\")\n z_elem.text = self._symmetry_type_to_text[self.z_symmetry]\n\n return base_elem", "def landscape(self):\n lscape = self._lscape_ref()\n return lscape", "def _layout_orientation(self):\n #Orientation\n orientation_txt = wx.StaticText(self, -1, 'Orientation:')\n x_orientation_txt = wx.StaticText(self, -1, 'x = ')\n self.x_orientation_tcl = wx.TextCtrl(self, -1,\n size=(_BOX_WIDTH, 20), style=0)\n y_orientation_txt = wx.StaticText(self, -1, 'y = ')\n self.y_orientation_tcl = wx.TextCtrl(self, -1,\n size=(_BOX_WIDTH, 20), style=0)\n z_orientation_txt = wx.StaticText(self, -1, 'z = ')\n self.z_orientation_tcl = wx.TextCtrl(self, -1,\n size=(_BOX_WIDTH, 20), style=0)\n orientation_unit_txt = wx.StaticText(self, -1, 'Unit: ')\n self.orientation_unit_tcl = wx.TextCtrl(self, -1,\n size=(_BOX_WIDTH, 20), style=0)\n self.orientation_sizer.AddMany([(orientation_txt, 0,\n wx.LEFT | wx.RIGHT, 10),\n (x_orientation_txt, 0, wx.LEFT, 7),\n (self.x_orientation_tcl, 0, wx.RIGHT, 10),\n (y_orientation_txt, 0, wx.EXPAND),\n (self.y_orientation_tcl, 0, wx.RIGHT, 10),\n (z_orientation_txt, 0, wx.EXPAND),\n (self.z_orientation_tcl, 0, wx.RIGHT, 10),\n (orientation_unit_txt, 0, wx.EXPAND),\n (self.orientation_unit_tcl, 0, wx.RIGHT, 10)])", "def dumpOrient(self):\r\n s = str(self.orient.flatten().tolist()).replace(\"[\", \"\").replace(\"]\", \"\") #make a list of the form \"f, f, ....\"\r\n return s", "def _get_frame_orientation_attrs(self):\n orientation_node = self.soup.find(id=\"COrientation\")\n attrs = self._get_attributes_from_node(orientation_node)\n return attrs", "def orientations(self) -> Sequence[str]:\n return pulumi.get(self, \"orientations\")", "def xml(self) -> ET.Element:\n return self.device_info.xml", "def generate_xml(self):\n assert self.xml_root != None, 'The self.xml_root variable must be set in your inheriting class'\n output = StringIO.StringIO()\n xd = XMLDumper(output, XML_DUMP_PRETTY | XML_STRICT_HDR)\n xd.XMLDumpKeyValue(self.xml_root, self.data.to_dict())\n output.seek(0)\n return output", "def get_orientation(self, rotation):\r\n return self.orientations[rotation % self.max_rotations]", "def toXML(self, level=0, indent='\\t'):\n\t\txml = StringIO()\n\t\tindentation = u''.join(indent for x in range(0,level))\n\t\t\n\t\txml.write(indentation)\n\t\txml.write(u'<partition name=\"%s\" index=\"%s\" id=\"%s\"/>' % \n\t\t\t\t(self._name, self._index, self._id))\n\t\t\n\t\tresult = xml.getvalue()\n\t\txml.close()\n\t\treturn result", "def get_orientation(self):\n if -4.9 < accelerometer.acceleration[0] < 4.9:\n self.orientation = 0\n else:\n self.orientation = 1", "def to_landscape(self) -> None:\n if self.is_portrait:\n self.width, self.height = self.height, self.width", "def to_xml_element(self):\n # Reset xml element tree\n self._plots_file.clear()\n\n self._create_plot_subelements()\n\n # Clean the indentation in the file to be user-readable\n clean_indentation(self._plots_file)\n # TODO: Remove when support is Python 3.8+\n reorder_attributes(self._plots_file)\n\n return self._plots_file", "def to_portrait(self) -> None:\n if self.is_landscape:\n self.width, self.height = self.height, self.width", "def xml(self):\n return self._domain.xml", "def set_orientation(self, value):\n self._selenium_web_driver().orientation = value.upper()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the rodrigues vector from the orientation matrix.
def OrientationMatrix2Rodrigues(g): t = g.trace() + 1 if np.abs(t) < np.finfo(g.dtype).eps: print('warning, returning [0., 0., 0.], consider using axis, angle representation instead') return np.zeros(3) else: r1 = (g[1, 2] - g[2, 1]) / t r2 = (g[2, 0] - g[0, 2]) / t r3 = (g[0, 1] - g[1, 0]) / t return np.array([r1, r2, r3])
[ "def rod_to_u(rodriguez_vector):\n r = n.asarray(rodriguez_vector, float)\n g = n.zeros((3, 3))\n r2 = n.dot(r , r)\n\n for i in range(3):\n for j in range(3):\n if i == j:\n fac = 1\n else:\n fac = 0\n term = 0\n for k in range(3):\n if [i, j, k] == [0, 1, 2] or \\\n [i, j, k] == [1, 2, 0] or \\\n [i, j, k] == [2, 0, 1]:\n sign = 1\n elif [i, j, k] == [2, 1, 0] or \\\n [i, j, k] == [0, 2, 1] or \\\n [i, j, k] == [1, 0, 2]:\n sign = -1\n else:\n sign = 0\n term = term + 2*sign*r[k]\n g[i, j] = 1/(1+r2) * ((1-r2)*fac + 2*r[i]*r[j] - term)\n return n.transpose(g)", "def _orientation_vectors(self):\n\n agent_orientations = np.empty((self.num_agents,2),dtype=np.float)\n\n for a_idx, a in enumerate(self._ctrl.agents):\n theta = a.th*2*math.pi/self.num_head_turns\n agent_orientations[a_idx] = [-1*math.sin(theta),math.cos(theta)]\n\n return agent_orientations", "def rotation_mat2vec(R):\n TINY = 1e-15\n\n # Compute the trace of the rotation matrix plus one\n aux = np.sqrt(R.trace()+1.0)\n \n if aux > TINY: \n\n # Compute the associated quaternion. Notice: trace(R) + 1 = 4w^2\n quat = np.array([R[2,1]-R[1,2], R[0,2]-R[2,0], R[1,0]-R[0,1], .5*aux])\n quat[0:3] *= .5/aux\n \n # Compute the angle between 0 and PI (ensure that the last\n # quaternion element is in the range (-1,1))\n theta = 2*np.arccos(max(-1., min(quat[3], 1.)))\n\n # Normalize the rotation axis\n norma = max(np.sqrt((quat[0:3]**2).sum()), TINY)\n return (theta/norma)*quat[0:3]\n \n else: \n \n # Singularity case: theta == PI. In this case, the above\n # identification is not possible since w=0. \n x2 = .25*(1 + R[0][0]-R[1][1]-R[2][2])\n if x2 > TINY: \n xy = .5*R[1][0]\n xz = .5*R[2][0]\n n = np.array([x2,xy,xz])\n else: \n y2 = .25*(1 + R[1][1]-R[0][0]-R[2][2])\n if y2 > TINY: \n xy = .5*R[1][0]\n yz = .5*R[2][1]\n n = np.array([xy,y2,yz])\n else: \n z2 = .25*(1 + R[2][2]-R[0][0]-R[1][1])\n if z2 > TINY: \n xz = .5*R[2][0]\n yz = .5*R[2][1]\n n = np.array([xz,yz,z2])\n return np.pi*n/np.sqrt((n**2).sum())", "def rvec2rpy_ros2(rvec):\n\n m, _ = cv2.Rodrigues(rvec)\n\n # // Assuming the angles are in radians.\n if m[1, 0] > 0.998: # // singularity at north pole\n yaw = math.atan2(m[0, 2], m[2, 2])\n roll = math.PI / 2\n pitch = 0\n elif m[1, 0] < -0.998: # // singularity at south pole\n yaw = math.atan2(m[0, 2], m[2, 2])\n roll = -math.PI / 2\n pitch = 0\n\n else:\n roll = -math.atan2(-m[2, 0], m[0, 0]) + math.pi\n pitch = -math.atan2(m[2, 2], m[1, 2]) + math.pi / 2\n yaw = -math.asin(m[1, 0])\n\n return roll, pitch, yaw", "def rodrigues(z,r,j):\n z /= np.linalg.norm(z)\n z0 = np.zeros(3)\n if np.any(z == 0):\n z0[z == 0] = 1\n else:\n z0[0] = 2/z[0]\n z0[1] = -1/z[1]\n z0[2] = -1/z[2]\n z0 /= np.linalg.norm(z0)\n z0 *= r\n z0 = z + z0\n z0 /= np.linalg.norm(z0)\n B = np.cross(z,z0)\n C = np.dot(z,z0)*z\n directions = np.zeros((j,3))\n for i in range(j):\n x = 2*np.pi*(i+1)/j\n directions[i,:] = z0*np.cos(x)+B*np.sin(x)+C*(1-np.cos(x))\n return directions", "def radial_to_vector(magnitude, direction, orientation='to'):\n assert orientation in ['from', 'to']\n v = np.cos(direction) * magnitude\n u = np.sin(direction) * magnitude\n if orientation == \"from\":\n v = -v\n u = -u\n return u, v", "def compute_angles(self):\n edges = self.edges().reshape(-1, 3, 2)\n vecs = np.diff(self.vertices[edges], axis=2)[:, :, 0]\n vecs = util.normalize(vecs)\n angles = np.arccos(-util.dot(vecs[:, [1, 2, 0]], vecs[:, [2, 0, 1]]))\n assert np.allclose(angles.sum(axis=1), np.pi, rtol=1e-3)\n return angles", "def friedrichs(U,V):\n angles = subspace_angles(U,V)\n return np.sort(angles)[0]", "def get_rpy(self):\n quat = self._state.pose.orientation\n # Edited python3 code\n rpy = euler_from_quaternion([quat.x, quat.y, quat.z, quat.w])\n # Initial python2 code\n # rpy = transformations.euler_from_quaternion([quat.x, quat.y, quat.z, quat.w])\n return rpy", "def get_orientation(self):\n pose = self.get_pose()\n orientation = np.array(pose.r)\n return orientation", "def directionVector(self):\n return array([math.cos(self.r), math.sin(self.r)])", "def rotate_right(self):\n return AgentOrientation((self.value + 1) % self.size())", "def rotation_matrices(self):\n matrices = []\n for r in self.rotations:\n matrices.append(cv2.Rodrigues(r))\n return matrices", "def vector_orientation (x, y):\n\tif x <= 0.3826 and x >= -0.3826 and y <= 1 and y >= 0.9238:\n\t\treturn \"North\"\n\telif x < 0.8660 and x > 0.3826 and y < 0.9238 and y > 0.5000:\n\t\treturn \"Northeast\"\n\telif x <= 1 and x >= 0.8660 and y <= 0.5000 and y >= -0.3583:\n\t\treturn \"East\"\n\telif x < 0.9335 and x > 0.3090 and y < -0.3583 and y > -0.9510:\n\t\treturn \"Southeast\"\n\telif x <= 0.3090 and x >= -0.3090 and y <= -0.9510 and y >= -1:\n\t\treturn \"South\"\n\telif x < -0.3090 and x > -0.9335 and y < -0.3583 and y > -0.9510:\n\t\treturn \"Southwest\"\n\telif x <= -0.8660 and x >= -1 and y <= 0.5000 and y >= -0.3583:\n\t\treturn \"West\"\n\telif x < -0.3826 and x > -0.8660 and y < 0.9238 and y > 0.5000:\n\t\treturn \"Northwest\"\n\telse:\n\t\treturn \"No orientation\"", "def direction_vector(self):\n return np.array([np.cos(self.angle), np.sin(self.angle)])", "def pose_up_vector_euler(euler_frame):\n ref_offset = np.array([1, 0, 0, 1])\n rot_angles = euler_frame[3:6]\n rot_angles_rad = np.deg2rad(rot_angles)\n rotmat = euler_matrix(rot_angles_rad[0],\n rot_angles_rad[1],\n rot_angles_rad[2],\n 'rxyz')\n rotated_point = np.dot(rotmat, ref_offset)\n up_vec = np.array([rotated_point[0], rotated_point[1]])\n up_vec /= np.linalg.norm(up_vec)\n return up_vec", "def polar2vect(delta, phi):\n import numpy as np\n x = np.cos(phi)\n y = np.sin(phi)\n z = np.sin(delta)\n vector = np.array([x,y,z])\n vector = vector / np.linalg.norm(vector)\n return vector", "def rotation_matrix_decompose(r):\n return numpy.array( (math.atan2(r[2][1],r[2][2]),\\\n math.atan2(-r[2][0],math.sqrt(r[2][1]*r[2][1]+r[2][2]*r[2][2])),\\\n math.atan2(r[1][0],r[0][0])))", "def rms(self):\n return _vnl_vectorPython.vnl_vectorUC_rms(self)", "def odeStateToOrientationVecs(odeState):\n r_w2b_w = odeState[0:3]\n # In the VPython frame, rotation around x-axis\n rot_w2v = np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]])\n r_w2b_v = rot_w2v.dot(r_w2b_w)\n euler_w2f = odeState[6:9]\n yaw_f2b = odeState[14]\n # yaw_b2p = odeState[15]\n euler_w2b = addYaw(euler_w2f, yaw_f2b)\n euler_w2b_w_xyz = eulerExtXYZfromEulerShrimp(euler_w2b)\n # Reorder euler angles around the correct axes\n euler_w2b_v_xyz = rot_w2v.dot(euler_w2b_w_xyz)\n return vpVecFromArr(r_w2b_v), vpVecFromArr(euler_w2b_v_xyz)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the orientation matrix from the Rodrigues vector.
def Rodrigues2OrientationMatrix(rod): r = np.linalg.norm(rod) I = np.diagflat(np.ones(3)) if r < np.finfo(r.dtype).eps: return I else: theta = 2 * np.arctan(r) n = rod / r omega = np.array([[0.0, n[2], -n[1]], [-n[2], 0.0, n[0]], [n[1], -n[0], 0.0]]) return I + np.sin(theta) * omega + (1 - np.cos(theta)) * omega.dot(omega)
[ "def _rotation_matrix_uniaxial(theta,phi, R):\n costheta = cos(theta)\n sintheta = sin(theta)\n cosphi = cos(phi)\n sinphi = sin(phi)\n \n R[0,0] = costheta * cosphi\n R[0,1] = - sinphi \n R[0,2] = cosphi * sintheta\n R[1,0] = costheta * sinphi \n R[1,1] = cosphi\n R[1,2] = sintheta * sinphi\n R[2,0] = -sintheta\n R[2,1] = 0.\n R[2,2] = costheta", "def rod_to_u(rodriguez_vector):\n r = n.asarray(rodriguez_vector, float)\n g = n.zeros((3, 3))\n r2 = n.dot(r , r)\n\n for i in range(3):\n for j in range(3):\n if i == j:\n fac = 1\n else:\n fac = 0\n term = 0\n for k in range(3):\n if [i, j, k] == [0, 1, 2] or \\\n [i, j, k] == [1, 2, 0] or \\\n [i, j, k] == [2, 0, 1]:\n sign = 1\n elif [i, j, k] == [2, 1, 0] or \\\n [i, j, k] == [0, 2, 1] or \\\n [i, j, k] == [1, 0, 2]:\n sign = -1\n else:\n sign = 0\n term = term + 2*sign*r[k]\n g[i, j] = 1/(1+r2) * ((1-r2)*fac + 2*r[i]*r[j] - term)\n return n.transpose(g)", "def rotations_from_vector(v):\n roll = 0\n pitch = math.atan2(v.z, math.sqrt(v.x ** 2 + v.y ** 2))\n yaw = math.atan2(v.y, v.x)\n return Rotations(roll, pitch, yaw)", "def rotation_matrix_decompose(r):\n return numpy.array( (math.atan2(r[2][1],r[2][2]),\\\n math.atan2(-r[2][0],math.sqrt(r[2][1]*r[2][1]+r[2][2]*r[2][2])),\\\n math.atan2(r[1][0],r[0][0])))", "def generate_random_rotation_matrix() -> np.ndarray:\n u = generate_random_unit_vector()\n v = generate_random_unit_vector()\n while np.abs(np.dot(u, v)) >= 0.99:\n v = generate_random_unit_vector()\n\n vp = v - (np.dot(u, v) * u)\n vp /= np.linalg.norm(vp)\n w = np.cross(u, vp)\n R = np.column_stack((u, vp, w))\n return R", "def rotation_matrix(u):\n c = 0.0\n s = 1.0\n # u has to be a versor in the l2-norm\n u = u / np.linalg.norm(u)\n x = u[0]\n y = u[1]\n z = u[2]\n C = 1. - c\n R = np.array([\n [(x * x * C + c), (x * y * C - z * s), (x * z * C + y * s)],\n [(y * x * C + z * s), (y * y * C + c), (y * z * C - x * s)],\n [(z * x * C - y * s), (z * y * C + x * s), (z * z * C + c)]])\n return R", "def _orientation_vectors(self):\n\n agent_orientations = np.empty((self.num_agents,2),dtype=np.float)\n\n for a_idx, a in enumerate(self._ctrl.agents):\n theta = a.th*2*math.pi/self.num_head_turns\n agent_orientations[a_idx] = [-1*math.sin(theta),math.cos(theta)]\n\n return agent_orientations", "def rotation_mat2vec(R):\n TINY = 1e-15\n\n # Compute the trace of the rotation matrix plus one\n aux = np.sqrt(R.trace()+1.0)\n \n if aux > TINY: \n\n # Compute the associated quaternion. Notice: trace(R) + 1 = 4w^2\n quat = np.array([R[2,1]-R[1,2], R[0,2]-R[2,0], R[1,0]-R[0,1], .5*aux])\n quat[0:3] *= .5/aux\n \n # Compute the angle between 0 and PI (ensure that the last\n # quaternion element is in the range (-1,1))\n theta = 2*np.arccos(max(-1., min(quat[3], 1.)))\n\n # Normalize the rotation axis\n norma = max(np.sqrt((quat[0:3]**2).sum()), TINY)\n return (theta/norma)*quat[0:3]\n \n else: \n \n # Singularity case: theta == PI. In this case, the above\n # identification is not possible since w=0. \n x2 = .25*(1 + R[0][0]-R[1][1]-R[2][2])\n if x2 > TINY: \n xy = .5*R[1][0]\n xz = .5*R[2][0]\n n = np.array([x2,xy,xz])\n else: \n y2 = .25*(1 + R[1][1]-R[0][0]-R[2][2])\n if y2 > TINY: \n xy = .5*R[1][0]\n yz = .5*R[2][1]\n n = np.array([xy,y2,yz])\n else: \n z2 = .25*(1 + R[2][2]-R[0][0]-R[1][1])\n if z2 > TINY: \n xz = .5*R[2][0]\n yz = .5*R[2][1]\n n = np.array([xz,yz,z2])\n return np.pi*n/np.sqrt((n**2).sum())", "def vector_orientation (x, y):\n\tif x <= 0.3826 and x >= -0.3826 and y <= 1 and y >= 0.9238:\n\t\treturn \"North\"\n\telif x < 0.8660 and x > 0.3826 and y < 0.9238 and y > 0.5000:\n\t\treturn \"Northeast\"\n\telif x <= 1 and x >= 0.8660 and y <= 0.5000 and y >= -0.3583:\n\t\treturn \"East\"\n\telif x < 0.9335 and x > 0.3090 and y < -0.3583 and y > -0.9510:\n\t\treturn \"Southeast\"\n\telif x <= 0.3090 and x >= -0.3090 and y <= -0.9510 and y >= -1:\n\t\treturn \"South\"\n\telif x < -0.3090 and x > -0.9335 and y < -0.3583 and y > -0.9510:\n\t\treturn \"Southwest\"\n\telif x <= -0.8660 and x >= -1 and y <= 0.5000 and y >= -0.3583:\n\t\treturn \"West\"\n\telif x < -0.3826 and x > -0.8660 and y < 0.9238 and y > 0.5000:\n\t\treturn \"Northwest\"\n\telse:\n\t\treturn \"No orientation\"", "def rotor_to_rotation_matrix(R):\n q = rotor_to_quaternion(R)\n return quaternion_to_matrix(q)", "def orientation_matrix(euler_angle):\n\n # Convert from degrees to radians\n phi1 = np.deg2rad(euler_angle[0])\n Phi = np.deg2rad(euler_angle[1])\n phi2 = np.deg2rad(euler_angle[2])\n\n # Assemble orientation matrix\n M = np.zeros([3, 3])\n M[0,0] = cos(phi1)*cos(phi2) - sin(phi1)*sin(phi2)*cos(Phi)\n M[0,1] = sin(phi1)*cos(phi2) + cos(phi1)*sin(phi2)*cos(Phi)\n M[0,2] = sin(phi2)*sin(Phi)\n M[1,0] = -cos(phi1)*sin(phi2) - sin(phi1)*cos(phi2)*cos(Phi)\n M[1,1] = -sin(phi1)*sin(phi2) + cos(phi1)*cos(phi2)*cos(Phi)\n M[1,2] = cos(phi2)*sin(Phi)\n M[2,0] = sin(phi1)*sin(Phi)\n M[2,1] = -cos(phi1)*sin(Phi)\n M[2,2] = cos(Phi)\n return M", "def orientation(p, q, r):\n # use the slope to get orientation\n val = (q[1] - p[1]) * (r[0] - q[0]) - (q[0] - p[0]) * (r[1] - q[1])\n\n if val == 0: # colinear\n return 0\n\n return 1 if val > 0 else 2 # clock or counterclokwise", "def get_rotationMatrix(self):\n rot_mat = quat2mat(self.quat)\n try:\n [U, s, V] = np.linalg.svd(rot_mat)\n return np.dot(U, V)\n except:\n return np.eye(3)", "def _rotate(self, theta, v):\n c, s = np.cos(theta), np.sin(theta)\n R = np.array(((c, -s), (s, c)))\n rot = np.dot(v, R)\n return rot", "def rotation_matrix(self):\n return np.array([self.axis_u, self.axis_v, self.axis_w])", "def rodrigues(z,r,j):\n z /= np.linalg.norm(z)\n z0 = np.zeros(3)\n if np.any(z == 0):\n z0[z == 0] = 1\n else:\n z0[0] = 2/z[0]\n z0[1] = -1/z[1]\n z0[2] = -1/z[2]\n z0 /= np.linalg.norm(z0)\n z0 *= r\n z0 = z + z0\n z0 /= np.linalg.norm(z0)\n B = np.cross(z,z0)\n C = np.dot(z,z0)*z\n directions = np.zeros((j,3))\n for i in range(j):\n x = 2*np.pi*(i+1)/j\n directions[i,:] = z0*np.cos(x)+B*np.sin(x)+C*(1-np.cos(x))\n return directions", "def optimal_rotation_matrix(source, target, allow_mirror=False):\n correlation = np.dot(target.points.T, source.points)\n U, D, Vt = np.linalg.svd(correlation)\n R = np.dot(U, Vt)\n\n if not allow_mirror:\n # d = sgn(det(V * Ut))\n d = np.sign(np.linalg.det(R))\n if d < 0:\n E = np.eye(U.shape[0])\n E[-1, -1] = d\n # R = U * E * Vt, E = [[1, 0, 0], [0, 1, 0], [0, 0, d]] for 2D\n R = np.dot(U, np.dot(E, Vt))\n return R", "def euler_angles_to_rotation_matrix(theta) -> np.ndarray:\n\n print(\"theta\", type(theta))\n\n r_x = np.array([[1, 0, 0],\n [0, cos(theta[0]), -sin(theta[0])],\n [0, sin(theta[0]), cos(theta[0])]\n ])\n\n r_y = np.array([[cos(theta[1]), 0, sin(theta[1])],\n [0, 1, 0],\n [-sin(theta[1]), 0, cos(theta[1])]\n ])\n\n r_z = np.array([[cos(theta[2]), -sin(theta[2]), 0],\n [sin(theta[2]), cos(theta[2]), 0],\n [0, 0, 1]\n ])\n\n rmat = np.dot(r_z, np.dot(r_y, r_x))\n return rmat", "def arc_to_matrix(vector0, vector1):\n \n vector0 = _setDimension(vector0,2)\n vector1 = _setDimension(vector1,2)\n \n vector0, vector1 = _matchDepth(vector0, vector1)\n \n return _quaternionToMatrix(_vectorArcToQuaternion(vector0, vector1))", "def rvec2rpy_ros2(rvec):\n\n m, _ = cv2.Rodrigues(rvec)\n\n # // Assuming the angles are in radians.\n if m[1, 0] > 0.998: # // singularity at north pole\n yaw = math.atan2(m[0, 2], m[2, 2])\n roll = math.PI / 2\n pitch = 0\n elif m[1, 0] < -0.998: # // singularity at south pole\n yaw = math.atan2(m[0, 2], m[2, 2])\n roll = -math.PI / 2\n pitch = 0\n\n else:\n roll = -math.atan2(-m[2, 0], m[0, 0]) + math.pi\n pitch = -math.atan2(m[2, 2], m[1, 2]) + math.pi / 2\n yaw = -math.asin(m[1, 0])\n\n return roll, pitch, yaw" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the axis/angle representation from the Rodrigues vector.
def Rodrigues2Axis(rod): r = np.linalg.norm(rod) axis = rod / r angle = 2 * np.arctan(r) return axis, angle
[ "def angle_vector(self):\n from math import atan2, pi\n return (atan2(self.y, self.x)) / pi * 180", "def get_angle_and_axis(self):\n # special case: no rotation\n if self == Rotation():\n angle = 0.\n axis = Vector((1., 0., 0.))\n return angle, axis\n\n # construct unit vector along rotation axis\n R = self.get_matrix()\n x = R[2, 1] - R[1, 2]\n y = R[0, 2] - R[2, 0]\n z = R[1, 0] - R[0, 1]\n axis = Vector((x, y, z)).unit_vector()\n\n # helper Vector\n w = Vector((z, x, y))\n\n # vector perpendicular to u\n v = axis.cross(w)\n\n # ========== magnitude =========================== #\n cos = 0.5 * (np.trace(R) - 1)\n acos = np.arccos(cos)\n\n # use sine to resolve ambiguity of acos\n # The following line of code is taken from\n # http://vhm.mathematik.uni-stuttgart.de/Vorlesungen/\n # Lineare_Algebra/Folien_Drehachse_und_Drehwinkel.pdf\n # (join the above two lines to get the proper URL)\n sin = v.dot(self.dot(axis))\n if sin < 0:\n angle = - acos\n else:\n angle = acos\n # ================================================ #\n\n return angle, axis", "def normal_angle_to_vector_xz(self, vector):\n vector = vector.reshape([3,1])\n innner = (vector[0,0]*self.a + vector[2,0]*self.c)\n scaling = np.sqrt(vector[0,0]**2 + vector[2,0]**2) * np.sqrt(self.a**2 + self.c**2)\n angle = np.arccos(innner / scaling)\n return angle", "def angle(self):\n return np.degrees(np.arctan2(self.u_vector[1], self.u_vector[0]))", "def angle_rad(u, v):\n a = u / np.linalg.norm(u, 2)\n b = v / np.linalg.norm(v, 2)\n return np.arctan2(\n a[0] * b[1] - a[1] * b[0],\n a[0] * b[0] + a[1] * b[1],\n )", "def _versor_to_axis_angle(valarr):\n assert len(valarr) % 3 == 0\n for vecstart in range(0, len(valarr), 3):\n vec = valarr[vecstart:vecstart + 3]\n comp_s = _np.sqrt(1.- (_np.sum(_np.square(vec))))\n recrot = 2. * _np.arctan2(_np.linalg.norm(vec), comp_s)\n recvec = vec / _np.sin(recrot / 2.)\n vec[:] = recvec / _np.linalg.norm(recvec) * recrot\n return valarr", "def angle(vector0, vector1):\n \n vector0 = _setDimension(vector0,2)\n vector1 = _setDimension(vector1,2)\n \n return _vectorArc(vector0, vector1)", "def directionVector(self):\n return array([math.cos(self.r), math.sin(self.r)])", "def angle(v):\n cos_theta = normalized(v)[0]\n theta = math.acos(cos_theta)\n if v[1] > 0:\n theta = -theta\n return rads_to_degs(theta)", "def angle_ref(self,*args,**kwds):\n R = self\n lonlat = kwds.get('lonlat',False)\n inv = kwds.get('inv',False)\n if len(args) == 1:\n arg=args[0]\n if not hasattr(arg,'__len__') or len(arg) < 2 or len(arg) > 3:\n raise TypeError('Argument must be a sequence of 2 or 3 '\n 'elements')\n if len(arg) == 2:\n v = dir2vec(arg[0],arg[1],lonlat=lonlat)\n else:\n v = arg\n elif len(args) == 2:\n v = dir2vec(args[0],args[1],lonlat=lonlat)\n elif len(args) == 3:\n v = args\n else:\n raise TypeError('Either 1, 2 or 3 arguments accepted')\n vp = R(v,inv=inv)\n north_pole = R([0.,0.,1.],inv=inv)\n sinalpha = north_pole[0]*vp[1]-north_pole[1]*vp[0]\n cosalpha = north_pole[2] - vp[2]*npy.dot(north_pole,vp)\n return npy.arctan2(sinalpha,cosalpha)", "def view_angle(self):\n view_i = -self.Ri[2,:].T\n view_j = -self.Rj[2,:].T\n return np.arccos(np.dot(view_i.T, view_j))", "def direction_vector(self):\n return np.array([np.cos(self.angle), np.sin(self.angle)])", "def compute_angles(self):\n edges = self.edges().reshape(-1, 3, 2)\n vecs = np.diff(self.vertices[edges], axis=2)[:, :, 0]\n vecs = util.normalize(vecs)\n angles = np.arccos(-util.dot(vecs[:, [1, 2, 0]], vecs[:, [2, 0, 1]]))\n assert np.allclose(angles.sum(axis=1), np.pi, rtol=1e-3)\n return angles", "def angle_deg(u, v):\n return angle_rad(u, v) * (180 / np.pi)", "def angle_to(self, vector):\n angle = vector.heading - self.heading\n if angle < 0:\n angle += 360\n return angle", "def rod_to_u(rodriguez_vector):\n r = n.asarray(rodriguez_vector, float)\n g = n.zeros((3, 3))\n r2 = n.dot(r , r)\n\n for i in range(3):\n for j in range(3):\n if i == j:\n fac = 1\n else:\n fac = 0\n term = 0\n for k in range(3):\n if [i, j, k] == [0, 1, 2] or \\\n [i, j, k] == [1, 2, 0] or \\\n [i, j, k] == [2, 0, 1]:\n sign = 1\n elif [i, j, k] == [2, 1, 0] or \\\n [i, j, k] == [0, 2, 1] or \\\n [i, j, k] == [1, 0, 2]:\n sign = -1\n else:\n sign = 0\n term = term + 2*sign*r[k]\n g[i, j] = 1/(1+r2) * ((1-r2)*fac + 2*r[i]*r[j] - term)\n return n.transpose(g)", "def axis_angle(u, theta):\n u = np.asarray(u)\n u = u/np.linalg.norm(u)\n ct = np.cos(theta)\n st = np.sin(theta)\n ux,uy,uz = u\n\n rot_matrix = np.array([ [ct + ux**2*(1-ct), ux*uy*(1-ct) - uz*st, ux*uz*(1-ct) + uy*st ],\n [ux*uy*(1-ct) + uz*st, ct + uy**2*(1-ct) , uy*uz*(1-ct) - ux*st],\n [uz*ux*(1-ct) - uy*st, uz*uy*(1-ct) + ux*st, ct + uz**2*(1-ct) ] ])\n return rot_matrix", "def vrrotvec(a, b):\r\n a = normalize(a)\r\n b = normalize(b)\r\n ax = normalize(np.cross(a, b))\r\n\r\n angle = np.arccos(np.minimum(np.dot(a, b), [1]))\r\n if not np.any(ax):\r\n absa = np.abs(a)\r\n mind = np.argmin(absa)\r\n c = np.zeros((1, 3))\r\n c[mind] = 0\r\n ax = normalize(np.cross(a, c))\r\n r = np.concatenate((ax, angle))\r\n return r", "def axang_to_rotm(r, with_magnitude=False):\n\n if with_magnitude:\n theta = np.linalg.norm(r) + 1e-15\n r = r / theta \n r = np.append(r, theta)\n\n kx, ky, kz, theta = r\n\n ctheta = math.cos(theta)\n stheta = math.sin(theta)\n vtheta = 1 - math.cos(theta)\n\n R = np.float32([\n [kx*kx*vtheta + ctheta, kx*ky*vtheta - kz*stheta, kx*kz*vtheta + ky*stheta],\n [kx*ky*vtheta + kz*stheta, ky*ky*vtheta + ctheta, ky*kz*vtheta - kx*stheta],\n [kx*kz*vtheta - ky*stheta, ky*kz*vtheta + kx*stheta, kz*kz*vtheta + ctheta ]\n ])\n\n return R", "def rotations_from_vector(v):\n roll = 0\n pitch = math.atan2(v.z, math.sqrt(v.x ** 2 + v.y ** 2))\n yaw = math.atan2(v.y, v.x)\n return Rotations(roll, pitch, yaw)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the (passive) orientation matrix associated the rotation defined by the given (axis, angle) pair.
def Axis2OrientationMatrix(axis, angle): omega = np.radians(angle) c = np.cos(omega) s = np.sin(omega) g = np.array([[c + (1 - c) * axis[0] ** 2, (1 - c) * axis[0] * axis[1] + s * axis[2], (1 - c) * axis[0] * axis[2] - s * axis[1]], [(1 - c) * axis[0] * axis[1] - s * axis[2], c + (1 - c) * axis[1] ** 2, (1 - c) * axis[1] * axis[2] + s * axis[0]], [(1 - c) * axis[0] * axis[2] + s * axis[1], (1 - c) * axis[1] * axis[2] - s * axis[0], c + (1 - c) * axis[2] ** 2]]) return g
[ "def rotate(angle, axis):\n a = normalize(axis)\n sin_t = math.sin(math.radians(angle))\n cos_t = math.cos(math.radians(angle))\n mat = Matrix4x4(a.x * a.x + (1.0 - a.x * a.x) * cos_t,\n a.x * a.y * (1.0 - cos_t) - a.z * sin_t,\n a.x * a.z * (1.0 - cos_t) + a.y * sin_t,\n 0.0,\n a.x * a.y * (1.0 - cos_t) + a.z * sin_t,\n a.y * a.y + (1.0 - a.y * a.y) * cos_t,\n a.y * a.z * (1.0 - cos_t) - a.x * sin_t,\n 0.0,\n a.x * a.z * (1.0 - cos_t) - a.y * sin_t,\n a.y * a.z * (1.0 - cos_t) + a.x * sin_t,\n a.z * a.z + (1.0 - a.z * a.z) * cos_t,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 1.0)\n return Transform(mat, transpose(mat))", "def axis_and_angle_of_rotation(self):\n if self.n_dims == 2:\n return self._axis_and_angle_of_rotation_2d()\n elif self.n_dims == 3:\n return self._axis_and_angle_of_rotation_3d()", "def angleAndAxisRotationFromQuaternion(*args):\n return _almathswig.angleAndAxisRotationFromQuaternion(*args)", "def _axis_and_angle_of_rotation_3d(self):\n eval_, evec = np.linalg.eig(self.rotation_matrix)\n real_eval_mask = np.isreal(eval_)\n real_eval = np.real(eval_[real_eval_mask])\n evec_with_real_eval = np.real_if_close(evec[:, real_eval_mask])\n error = 1e-7\n below_margin = np.abs(real_eval) < (1 + error)\n above_margin = (1 - error) < np.abs(real_eval)\n re_unit_eval_mask = np.logical_and(below_margin, above_margin)\n evec_with_real_unitary_eval = evec_with_real_eval[:, re_unit_eval_mask]\n # all the eigenvectors with real unitary eigenvalues are now all\n # equally 'valid' if multiple remain that probably means that this\n # rotation is actually a no op (i.e. rotate by 360 degrees about any\n # axis is an invariant transform) but need to check this. For now,\n # just take the first\n if evec_with_real_unitary_eval.shape[1] != 1:\n # TODO confirm that multiple eigenvalues of 1 means the rotation\n # does nothing\n return None, None\n axis = evec_with_real_unitary_eval[:, 0]\n axis /= np.sqrt((axis ** 2).sum()) # normalize to unit vector\n # to find the angle of rotation, build a new unit vector perpendicular\n # to the axis, and see how it rotates\n axis_temp_vector = axis - np.random.rand(axis.size)\n perpendicular_vector = np.cross(axis, axis_temp_vector)\n perpendicular_vector /= np.sqrt((perpendicular_vector ** 2).sum())\n transformed_vector = np.dot(self.rotation_matrix, perpendicular_vector)\n angle_of_rotation = np.arccos(np.dot(transformed_vector, perpendicular_vector))\n chirality_of_rotation = np.dot(\n axis, np.cross(perpendicular_vector, transformed_vector)\n )\n if chirality_of_rotation < 0:\n angle_of_rotation *= -1.0\n return axis, angle_of_rotation", "def get_angle_and_axis(self):\n # special case: no rotation\n if self == Rotation():\n angle = 0.\n axis = Vector((1., 0., 0.))\n return angle, axis\n\n # construct unit vector along rotation axis\n R = self.get_matrix()\n x = R[2, 1] - R[1, 2]\n y = R[0, 2] - R[2, 0]\n z = R[1, 0] - R[0, 1]\n axis = Vector((x, y, z)).unit_vector()\n\n # helper Vector\n w = Vector((z, x, y))\n\n # vector perpendicular to u\n v = axis.cross(w)\n\n # ========== magnitude =========================== #\n cos = 0.5 * (np.trace(R) - 1)\n acos = np.arccos(cos)\n\n # use sine to resolve ambiguity of acos\n # The following line of code is taken from\n # http://vhm.mathematik.uni-stuttgart.de/Vorlesungen/\n # Lineare_Algebra/Folien_Drehachse_und_Drehwinkel.pdf\n # (join the above two lines to get the proper URL)\n sin = v.dot(self.dot(axis))\n if sin < 0:\n angle = - acos\n else:\n angle = acos\n # ================================================ #\n\n return angle, axis", "def orientation_matrix(euler_angle):\n\n # Convert from degrees to radians\n phi1 = np.deg2rad(euler_angle[0])\n Phi = np.deg2rad(euler_angle[1])\n phi2 = np.deg2rad(euler_angle[2])\n\n # Assemble orientation matrix\n M = np.zeros([3, 3])\n M[0,0] = cos(phi1)*cos(phi2) - sin(phi1)*sin(phi2)*cos(Phi)\n M[0,1] = sin(phi1)*cos(phi2) + cos(phi1)*sin(phi2)*cos(Phi)\n M[0,2] = sin(phi2)*sin(Phi)\n M[1,0] = -cos(phi1)*sin(phi2) - sin(phi1)*cos(phi2)*cos(Phi)\n M[1,1] = -sin(phi1)*sin(phi2) + cos(phi1)*cos(phi2)*cos(Phi)\n M[1,2] = cos(phi2)*sin(Phi)\n M[2,0] = sin(phi1)*sin(Phi)\n M[2,1] = -cos(phi1)*sin(Phi)\n M[2,2] = cos(Phi)\n return M", "def rotation_matrix_2d(angle):\n\n return np.array([ [np.cos(angle), -np.sin(angle)],\n [np.sin(angle), np.cos(angle)] ])", "def generate_2D_rotation(angle):\n angle_rad = angle/180.*np.pi\n return np.array([[np.cos(angle_rad),-np.sin(angle_rad)],[np.sin(angle_rad),np.cos(angle_rad)]])", "def rotationFromAngleDirection(*args):\n return _almathswig.rotationFromAngleDirection(*args)", "def set_angle_and_axis(self, angle, axis):\n # special case: no rotation\n if angle == 0:\n self.matrix = np.eye(3)\n return self\n\n # special case: coordinate axis\n if axis in range(3):\n # delegate to sub-function\n co = np.zeros(3)\n co[axis] = 1.\n axis = Vector(co)\n return self.set_angle_and_axis(angle, axis)\n\n # input check\n if not isinstance(axis, Vector):\n raise TypeError('`axis` must be a Vector or an int between 0..2 .')\n\n # shortcuts\n sin = np.sin(angle)\n cos = np.cos(angle)\n\n # normalize\n norm = 1. * abs(axis)\n n = axis.get_car() / norm\n\n # build rotation matrix\n first = (1 - cos) * np.outer(n, n)\n diag = cos * np.eye(3)\n off = sin * np.array([[ 0., -n[2], n[1]],\n [ n[2], 0., -n[0]],\n [-n[1], n[0], 0.]])\n M = first + diag + off\n self.matrix = M\n return self", "def shear_matrix_from_angle(angle, ndim=3, axes=(-1, 0)):\n matrix = np.eye(ndim)\n matrix[axes] = np.tan(np.deg2rad(90 - angle))\n return matrix", "def rotate(self, angle: float, axis: str):\n angle *= np.pi / 180.0 # deg to rad\n sin = np.sin(angle)\n cos = np.cos(angle)\n axis = axis.lower()\n if axis == \"x\":\n rotation_matrix = [[1, 0, 0], [0, cos, -sin], [0, sin, cos]]\n elif axis == \"y\":\n rotation_matrix = [[cos, 0, sin], [0, 1, 0], [-sin, 0, cos]]\n elif axis == \"z\":\n rotation_matrix = [[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]]\n else:\n raise ValueError(\"axis should be 'x', 'y' or 'z', got {}\".format(\n axis))\n rotation_matrix = np.array(rotation_matrix)\n self.position = np.matmul(rotation_matrix, self.position)", "def rotation_matrix(self):\n return np.array([self.axis_u, self.axis_v, self.axis_w])", "def rotate(axis, angle, xyz):\n rot = rotationmatrix(axis, angle)\n if len(xyz.shape) == 1: # just one point\n if not len(xyz) == 3:\n raise ValueError(\"Points have to either be of shape (3,) or (n,3). \")\n return multiply(rot, xyz)\n if len(xyz.shape) == 2:\n # we assume xyz to be of shape n x 3\n if not xyz.shape[1] == 3:\n raise ValueError(\"Points have to either be of shape (3,) or (n,3). \")\n\n return matmul(rot, xyz.T).T\n\n raise ValueError(\"Points have to either be of shape (3,) or (n,3). \")", "def getRotationMatrix(angle, direction, point=None):\n sina = math.sin(angle)\n cosa = math.cos(angle)\n # rotation matrix around unit vector\n rot = np.diag([cosa, cosa, cosa])\n rot += np.outer(direction, direction) * (1.0 - cosa)\n direction *= sina\n rot += np.array([[0.0, -direction[2], direction[1]],\n [direction[2], 0.0, -direction[0]],\n [-direction[1], direction[0], 0.0]])\n matrix = np.identity(4)\n matrix[:3, :3] = rot\n if point is not None:\n # rotation not around origin\n point = np.array(point[:3], dtype=np.float64, copy=False)\n matrix[:3, 3] = point - np.dot(rot, point)\n return matrix", "def bondmat(axis, angle):\n g = rot2mat(axis = axis, angle = angle)\n M = np.array([[g[0,0]**2, g[0,1]**2, g[0,2]**2, 2.*g[0,1]*g[0,2], 2.*g[0,2]*g[0,0], 2.*g[0,0]*g[0,1]],\n [g[1,0]**2, g[1,1]**2, g[1,2]**2, 2.*g[1,1]*g[1,2], 2.*g[1,2]*g[1,0], 2.*g[1,0]*g[1,1]],\n [g[2,0]**2, g[2,1]**2, g[2,2]**2, 2.*g[2,1]*g[2,2], 2.*g[2,2]*g[2,0], 2.*g[2,0]*g[2,1]],\n [g[1,0]*g[2,0], g[1,1]*g[2,1], g[1,2]*g[2,2], g[1,1]*g[2,2]+g[1,2]*g[2,1], g[1,0]*g[2,2]+g[1,2]*g[2,0], g[1,1]*g[2,0]+g[1,0]*g[2,1]],\n [g[2,0]*g[0,0], g[2,1]*g[0,1], g[2,2]*g[0,2], g[0,1]*g[2,2]+g[0,2]*g[2,1], g[0,2]*g[2,0]+g[0,0]*g[2,2], g[0,0]*g[2,1]+g[0,1]*g[2,0]],\n [g[0,0]*g[1,0], g[0,1]*g[1,1], g[0,2]*g[1,2], g[0,1]*g[1,2]+g[0,2]*g[1,1], g[0,2]*g[1,0]+g[0,0]*g[1,2], g[0,0]*g[1,1]+g[0,1]*g[1,0]]\n ])\n return M", "def angle_to_euler(axis, angle=0., axes=XYZ): \n \n axis = _setDimension(axis,2)\n angle = _setDimension(angle,1) \n axes = _setDimension(axes,1,dtype=np.int32)\n axis, angle,axes = _matchDepth(axis, angle, axes)\n \n M = _axisAngleToMatrix(axis, angle)\n return _matrixToEuler(M, axes)", "def axis_calc(self, axis):\n # TODO: Rewrite this method to allow non-90deg planes to work\n # Figure out which axes the plane exists in\n axes = [1, 1, 1]\n axes[0] = (axis.v0.x - axis.v1.x - axis.v2.x) / 3.0\n axes[1] = (axis.v0.y - axis.v1.y - axis.v2.y) / 3.0\n axes[2] = (axis.v0.z - axis.v1.z - axis.v2.z) / 3.0\n # if axis.v0.x == axis.v1.x == axis.v2.x:\n # axes[0] = 0\n # if axis.v0.y == axis.v1.y == axis.v2.y:\n # axes[1] = 0\n # if axis.v0.z == axis.v1.z == axis.v2.z:\n # axes[2] = 0\n\n # Figure out uaxis xyz\n u = [0, 0, 0]\n for i in range(3):\n if axes[i] != 0.0:\n u[i] = axes[i]\n axes[i] = 0\n break\n\n # Figure out vaxis xyz\n v = [0, 0, 0]\n for i in range(3):\n if axes[i] != 0.0:\n v[i] = -axes[i]\n break\n\n uaxis = Axis(u[0], u[1], u[2])\n vaxis = Axis(v[0], v[1], v[2])\n return (uaxis, vaxis)", "def axisRotationProjection(*args):\n return _almathswig.axisRotationProjection(*args)", "def rotation_matrix_decompose(r):\n return numpy.array( (math.atan2(r[2][1],r[2][2]),\\\n math.atan2(-r[2][0],math.sqrt(r[2][1]*r[2][1]+r[2][2]*r[2][2])),\\\n math.atan2(r[1][0],r[0][0])))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the rodrigues vector from the 3 euler angles (in degrees)
def Euler2Rodrigues(euler): (phi1, Phi, phi2) = np.radians(euler) a = 0.5 * (phi1 - phi2) b = 0.5 * (phi1 + phi2) r1 = np.tan(0.5 * Phi) * np.cos(a) / np.cos(b) r2 = np.tan(0.5 * Phi) * np.sin(a) / np.cos(b) r3 = np.tan(b) return np.array([r1, r2, r3])
[ "def compute_angles(self):\n edges = self.edges().reshape(-1, 3, 2)\n vecs = np.diff(self.vertices[edges], axis=2)[:, :, 0]\n vecs = util.normalize(vecs)\n angles = np.arccos(-util.dot(vecs[:, [1, 2, 0]], vecs[:, [2, 0, 1]]))\n assert np.allclose(angles.sum(axis=1), np.pi, rtol=1e-3)\n return angles", "def getrxryrz(u):\n c2s1 = -u[1,2]\n c2c1 = u[2,2]\n r1 = -np.arctan2(c2s1,c2c1)\n c2c3 = u[0,0]\n c2s3 = -u[0,1]\n r3 = -np.arctan2( c2s3, c2c3 )\n s2 = u[0,2]\n if abs(np.sin(r3)) > 0.5:\n c2 = c2s3 / np.sin(r3)\n else:\n c2 = c2c3 / np.cos(r3)\n r2 = -np.arctan2( s2, c2 )\n if 1:\n utest = np.dot(np.dot(rotmatx(r1),rotmaty(r2)),rotmatz(r3))\n assert abs(utest-u).ravel().sum() < 1e-10\n return r1,r2,r3", "def _applyEuler(self, vector, eulerRot):\n # https://www.cs.utexas.edu/~theshark/courses/cs354/lectures/cs354-14.pdf\n eulerRot_rad = np.deg2rad(eulerRot)\n # X \n vector_x = np.copy(vector)\n vector_x[1] = vector[1] * np.cos(eulerRot_rad[0]) - vector[2] * np.sin(eulerRot_rad[0])\n vector_x[2] = vector[1] * np.sin(eulerRot_rad[0]) + vector[2] * np.cos(eulerRot_rad[0])\n\n # Y\n vector_y = np.copy(vector_x)\n vector_y[0] = vector_x[0] * np.cos(eulerRot_rad[1]) + vector_x[2] * np.sin(eulerRot_rad[1])\n vector_y[2] = -vector_x[0] * np.sin(eulerRot_rad[1]) + vector_x[2] * np.cos(eulerRot_rad[1])\n\n # Z\n vector_z = np.copy(vector_y)\n vector_z[0] = vector_y[0] * np.cos(eulerRot_rad[2]) - vector_y[1] * np.sin(eulerRot_rad[2])\n vector_z[1] = vector_y[0] * np.sin(eulerRot_rad[2]) + vector_y[1] * np.cos(eulerRot_rad[2])\n\n return vector_z", "def test_generate_rotation_rotor_and_angle(self):\n from clifford.tools.g3 import generate_rotation_rotor, random_unit_vector, angle_between_vectors\n\n euc_vector_m = random_unit_vector()\n euc_vector_n = random_unit_vector()\n theta = angle_between_vectors(euc_vector_m, euc_vector_n)\n print(theta)\n\n rot_rotor = generate_rotation_rotor(theta, euc_vector_m, euc_vector_n)\n v1 = euc_vector_m\n v2 = rot_rotor*euc_vector_m*~rot_rotor\n theta_return = angle_between_vectors(v1, v2)\n print(theta_return)\n\n testing.assert_almost_equal(theta_return, theta)\n testing.assert_almost_equal(euc_vector_n.value, v2.value)", "def quaternion_from_euler_angles(yaw, pitch, roll):\n #Roll = phi, pitch = theta, yaw = psi\n return (cos(roll/2)*cos(pitch/2)*cos(yaw/2) + sin(roll/2)*sin(pitch/2)*sin(yaw/2),\n sin(roll/2)*cos(pitch/2)*cos(yaw/2) - cos(roll/2)*sin(pitch/2)*sin(yaw/2),\n cos(roll/2)*sin(pitch/2)*cos(yaw/2) + sin(roll/2)*cos(pitch/2)*sin(yaw/2),\n cos(roll/2)*cos(pitch/2)*sin(yaw/2) - sin(roll/2)*sin(pitch/2)*cos(yaw/2))", "def rotations_to_radians(rotations):\n return np.pi * 2 * rotations", "def rod_to_u(rodriguez_vector):\n r = n.asarray(rodriguez_vector, float)\n g = n.zeros((3, 3))\n r2 = n.dot(r , r)\n\n for i in range(3):\n for j in range(3):\n if i == j:\n fac = 1\n else:\n fac = 0\n term = 0\n for k in range(3):\n if [i, j, k] == [0, 1, 2] or \\\n [i, j, k] == [1, 2, 0] or \\\n [i, j, k] == [2, 0, 1]:\n sign = 1\n elif [i, j, k] == [2, 1, 0] or \\\n [i, j, k] == [0, 2, 1] or \\\n [i, j, k] == [1, 0, 2]:\n sign = -1\n else:\n sign = 0\n term = term + 2*sign*r[k]\n g[i, j] = 1/(1+r2) * ((1-r2)*fac + 2*r[i]*r[j] - term)\n return n.transpose(g)", "def angle_vector(self):\n from math import atan2, pi\n return (atan2(self.y, self.x)) / pi * 180", "def rotvec(base, angles):\n # print base, angles\n rotangles = np.array([asin(angles[0]) * 2, asin(angles[1]) * 2, asin(angles[2]) * 2])\n rotmx = ([1, 0, 0], [0, cos(rotangles[0]), -sin(rotangles[0])], [0, sin(rotangles[0]), cos(rotangles[0])])\n rotmy = ([cos(rotangles[1]), 0, sin(rotangles[1])], [0, 1, 0], [-sin(rotangles[1]), 0, cos(rotangles[1])])\n rotmz = ([cos(rotangles[2]), -sin(rotangles[2]), 0], [sin(rotangles[2]), cos(rotangles[2]), 0], [0, 0, 1])\n\n step1 = np.dot(base, rotmx)\n step2 = np.dot(step1, rotmy)\n step3 = np.dot(step2, rotmz)\n # print step3\n return step3, (rotangles/(2*pi))*360", "def get_rpy(self):\n quat = self._state.pose.orientation\n # Edited python3 code\n rpy = euler_from_quaternion([quat.x, quat.y, quat.z, quat.w])\n # Initial python2 code\n # rpy = transformations.euler_from_quaternion([quat.x, quat.y, quat.z, quat.w])\n return rpy", "def polar2vect(delta, phi):\n import numpy as np\n x = np.cos(phi)\n y = np.sin(phi)\n z = np.sin(delta)\n vector = np.array([x,y,z])\n vector = vector / np.linalg.norm(vector)\n return vector", "def rodrigues(z,r,j):\n z /= np.linalg.norm(z)\n z0 = np.zeros(3)\n if np.any(z == 0):\n z0[z == 0] = 1\n else:\n z0[0] = 2/z[0]\n z0[1] = -1/z[1]\n z0[2] = -1/z[2]\n z0 /= np.linalg.norm(z0)\n z0 *= r\n z0 = z + z0\n z0 /= np.linalg.norm(z0)\n B = np.cross(z,z0)\n C = np.dot(z,z0)*z\n directions = np.zeros((j,3))\n for i in range(j):\n x = 2*np.pi*(i+1)/j\n directions[i,:] = z0*np.cos(x)+B*np.sin(x)+C*(1-np.cos(x))\n return directions", "def angle_rad(u, v):\n a = u / np.linalg.norm(u, 2)\n b = v / np.linalg.norm(v, 2)\n return np.arctan2(\n a[0] * b[1] - a[1] * b[0],\n a[0] * b[0] + a[1] * b[1],\n )", "def haversinrad(angle):\n return ((1.0 - math.cos(angle))/2.0)", "def radial_to_vector(magnitude, direction, orientation='to'):\n assert orientation in ['from', 'to']\n v = np.cos(direction) * magnitude\n u = np.sin(direction) * magnitude\n if orientation == \"from\":\n v = -v\n u = -u\n return u, v", "def get_angles(vectors):\n angles = []\n for i in range(len(vectors)-2):\n angles.append(calc_angle(vectors[i],\n vectors[i+1],\n vectors[i+2]))\n return angles", "def vector_ang(u, th):\n\n idx = [[1,0,0],[0,1,0],[0,0,1]]\n r = np.zeros((3,3))\n\n pi = np.pi\n ct = np.cos(th * pi / 180.)\n st = np.sin(th * pi / 180.)\n\n cm = crossop(u)\n for i in range(3):\n for j in range(3):\n r[i][j] = idx[i][j] * ct + st * cm[i][j] +\\\n (1 - ct) * u[i] * u[j]\n return r", "def view_angle(self):\n view_i = -self.Ri[2,:].T\n view_j = -self.Rj[2,:].T\n return np.arccos(np.dot(view_i.T, view_j))", "def inverse_euler(angles):\n sin_angles = torch.sin(angles)\n cos_angles = torch.cos(angles)\n sz, sy, sx = torch.unbind(-sin_angles, axis=-1)\n cz, _, cx = torch.unbind(cos_angles, axis=-1)\n y = torch.asin((cx * sy * cz) + (sx * sz))\n x = -torch.asin((sx * sy * cz) - (cx * sz)) / torch.cos(y)\n z = -torch.asin((cx * sy * sz) - (sx * cz)) / torch.cos(y)\n return torch.stack([x, y, z], dim=-1)", "def Rbody2nav_to_angle(R, output_units='rad', rotation_sequence='321'):\n yaw = np.arctan2(R[1,0], R[0,0])\n #pitch = -np.arctan(R[2,0] / np.sqrt(1.-R[2,0]**2)) # Farrel eqn 2.45\n pitch = -np.arcsin(R[2,0]) # this is simpler\n roll = np.arctan2(R[2,1], R[2,2] )\n \n # Apply necessary unit transformations.\n if output_units == 'rad':\n pass\n elif output_units == 'deg':\n yaw, pitch, roll = np.degrees([yaw, pitch, roll])\n \n return yaw, pitch, roll" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a set of euler angles from an ascii file.
def read_euler_txt(txt_path): return Orientation.read_orientations(txt_path)
[ "def read_euler(self):\n data = self.bus.read_i2c_block_data(self.address, 0x1A, 6)\n return self.parse_axis(data, 16)", "def read_ascii(fn):\n try:\n inputfile = open(fn, 'r')\n except Exception as error:\n print(error)\n return\n lines = inputfile.readlines()\n inputfile.close()\n return lines", "def load_triangle(filename):\n values = list()\n with open(filename) as f:\n next_line = f.readline()\n while next_line:\n values.extend(map(lambda x: int(x), next_line.split(' ')))\n next_line = f.readline()\n return values", "def input_energies(file_):\n f = open(file_,\"r\")\n names = []\n stoich = []\n energies = []\n for line in f:\n line = line.split()\n names.append(line[0])\n energies.append(float(line[-1]))\n stoich.append([])\n for i in range(1,len(line)-1):\n stoich[-1].append(float(line[i]))\n phases = []\n for i in range(len(names)):\n phases.append(Phase(stoich[i],energies[i]))\n #return names,stoich,energies\n return phases", "def _read_elliptic_files(self):\n file_1_2 = os.path.join(\n mm.DATA_PATH, 'interpolate_elliptic_integral_1_2.dat')\n file_3 = os.path.join(\n mm.DATA_PATH, 'interpolate_elliptic_integral_3.dat')\n\n (x, y1, y2) = np.loadtxt(file_1_2, unpack=True)\n PointLens._interpolate_1 = interp1d(np.log10(x), y1, kind='cubic')\n PointLens._interpolate_2 = interp1d(np.log10(x), y2, kind='cubic')\n PointLens._interpolate_1_2_x_min = np.min(np.log10(x))\n PointLens._interpolate_1_2_x_max = np.max(np.log10(x))\n\n with open(file_3) as file_in:\n for line in file_in.readlines():\n if line[:3] == \"# X\":\n xx = np.array([float(t) for t in line.split()[2:]])\n if line[:3] == \"# Y\":\n yy = np.array([float(t) for t in line.split()[2:]])\n pp = np.loadtxt(file_3)\n PointLens._interpolate_3 = interp2d(xx, yy, pp.T, kind='cubic')\n PointLens._interpolate_3_min_x = np.min(xx)\n PointLens._interpolate_3_max_x = np.max(xx)\n PointLens._interpolate_3_min_y = np.min(yy)\n PointLens._interpolate_3_max_y = np.max(yy)\n\n PointLens._elliptic_files_read = True", "def sun_ang(sun_file):\n angles = []\n with open(sun_file,\"r\",encoding='utf-8') as sun_info:\n logger.debug(\"Opened file: %s\", sun_file)\n for line in sun_info:\n if \"ANGLE\" in line:\n x = line.split('\"deg\">')\n logger.debug(x)\n x = x[1].split('</')\n logger.debug(x)\n angles = np.append(angles,float(x[0]))\n logger.info(\"Sun zenith and azimuth angles: %s\", angles)\n return angles #Returns zenith and azimuth angles respectively", "def nai_detector_angles():\n\n # angles listed as [azimuth, zenith]\n detectors = {\n \"n0\": [45.89 * u.deg, 20.58 * u.deg],\n \"n1\": [45.11 * u.deg, 45.31 * u.deg],\n \"n2\": [58.44 * u.deg, 90.21 * u.deg],\n \"n3\": [314.87 * u.deg, 45.24 * u.deg],\n \"n4\": [303.15 * u.deg, 90.27 * u.deg],\n \"n5\": [3.35 * u.deg, 89.79 * u.deg],\n \"n6\": [224.93 * u.deg, 20.43 * u.deg],\n \"n7\": [224.62 * u.deg, 46.18 * u.deg],\n \"n8\": [236.61 * u.deg, 89.97 * u.deg],\n \"n9\": [135.19 * u.deg, 45.55 * u.deg],\n \"n10\": [123.73 * u.deg, 90.42 * u.deg],\n \"n11\": [183.74 * u.deg, 90.32 * u.deg],\n }\n\n return detectors", "def read_qe(in_name):\n with open(in_name) as file_qe:\n content = file_qe.readlines()\n\n last_pos = 0\n for line in content[::-1]:\n if \"ATOMIC_POSITIONS\" in line.split():\n last_pos = content[::-1].index(line)\n break\n\n atoms = []\n for line in content[-last_pos:]:\n if line == \"End final coordinates\\n\":\n break\n elem, xPos, yPos, zPos = line.split()\n atom_2_add = Atom(elem, xPos, yPos, zPos, 0)\n atoms.append(atom_2_add)\n return atoms", "def read_molcas(in_name):\n with open(in_name) as data:\n lines = data.readlines()\n\n grad = np.array([])\n ex_energy = None\n gr_energy = None\n\n reading = False\n\n for line in lines:\n if line.strip():\n # Energies\n if \"RASSCF root number 1 Total energy:\" in line:\n gr_energy = float(line.split()[-1])\n if \"RASSCF root number 2 Total energy:\" in line:\n ex_energy = float(line.split()[-1])\n # Gradients\n if \"Molecular gradients\" in line:\n reading = True\n if reading:\n if len(line.split()) == 4 and line.split()[0][0].isalpha():\n nums = [float(i) for i in line.split()[1:]]\n grad = np.concatenate((grad, nums))\n if not ex_energy:\n ex_energy = gr_energy\n return ex_energy, grad, gr_energy", "def get_anchors(anchors_path):\r\n with open(anchors_path) as f:\r\n anchors = f.readline()\r\n anchors = [np.float32(x) for x in anchors.split(',')]\r\n anchors = np.array(anchors).reshape(-1, 2)\r\n return anchors", "def read_aev(fname):\n\n try:\n f = open(fname, \"r\")\n except IOError:\n print(\"Could not open file:\" + fname)\n sys.exit()\n with f:\n aevd = f.readlines()\n\n n_line = len(aevd)\n npt = int(aevd[0])\n n_atom = int(aevd[1])\n dout = int(aevd[2])\n\n aev = [ [ [0]*dout for a in range(n_atom)] for p in range(npt)]\n line = 3\n for p in range(npt):\n \tfor a in range(n_atom):\n \t\tfor i in range(dout):\n \t\t\taev[p][a][i]=float(aevd[line])\n \t\t\tline += 1\n return npt, n_atom, dout, aev", "def import_ascii(file_path=None):\n\n if file_path is None:\n file_path = askopenfilename(title='Select AFM image ASCII file', filetypes=((\"ASCII files\", \"*.asc\"),))\n file_name = file_path.split('/')[-1]\n f = open(file_path, 'r')\n\n # Read each line, discriminate between header line and height value line by checking if the\n # content of the first entry of the line is a digit or not\n img = []\n for line in f:\n try:\n first_entry = line.strip().split()[0][-5:]\n meas_par = line.split()[1]\n\n if first_entry.isdigit() or first_entry[-5:-3] == 'e-' or first_entry[-4:-2] == 'e-':\n line = line.strip()\n floats = [float(x) for x in line.split()]\n img.append(np.asarray(floats))\n\n # Find the required measurement information\n elif meas_par == 'x-pixels':\n x_pixels = float(line.split()[-1])\n\n # Find the required measurement information\n elif meas_par == 'y-pixels':\n y_pixels = float(line.split()[-1])\n\n elif meas_par == 'x-length':\n x_length = float(line.split()[-1])\n\n except IndexError:\n pass\n\n if 'x_pixels' not in locals():\n x_pixels = 'unknown'\n print('The amount of x-pixels was not found in the header')\n\n if 'y_pixels' not in locals():\n y_pixels = 'unknown'\n print('The amount of y-pixels was not found in the header')\n\n if 'x_length' not in locals():\n x_length = 'unknown'\n print('The size of the image was not found in the header')\n\n img = np.asarray(img)\n img_meta_data = {'file_name': file_name,\n 'file_path': file_path,\n 'x_pixels': x_pixels,\n 'x_length': x_length,\n 'y_pixels': y_pixels,\n 'pixel_size': x_length/x_pixels}\n\n return np.asarray(img), img_meta_data", "def read_orientation_file(path):\n rotationdf = pd.read_csv(\n path,\n sep=' ',\n index_col=0,\n names=['strip', 'direction'],\n header=None\n )\n rotationdf['direction'] = rotationdf['direction'].astype(int)\n return rotationdf", "def read_file(file_path):\n numbers = io.read_numbers_from_file(file_path)\n for each in numbers:\n print each", "def read_polar(infile):\n\n regex = re.compile('(?:\\s*([+-]?\\d*.\\d*))')\n\n with open(infile) as f:\n lines = f.readlines()\n\n a = []\n cl = []\n cd = []\n cdp = []\n cm = []\n xtr_top = []\n xtr_bottom = []\n\n for line in lines[12:]:\n linedata = regex.findall(line)\n a.append(float(linedata[0]))\n cl.append(float(linedata[1]))\n cd.append(float(linedata[2]))\n cdp.append(float(linedata[3]))\n cm.append(float(linedata[4]))\n xtr_top.append(float(linedata[5]))\n xtr_bottom.append(float(linedata[6]))\n\n data = {'a': np.array(a), 'cl': np.array(cl), 'cd': np.array(cd), 'cdp': np.array(cdp),\n 'cm': np.array(cm), 'xtr_top': np.array(xtr_top), 'xtr_bottom': np.array(xtr_bottom)}\n\n return data", "def from_ascii(self, filename, date=True, compression=None, verbose=False):\n #-- set filename\n self.case_insensitive_filename(filename)\n print(self.filename) if verbose else None\n #-- open the ascii file and extract contents\n if (compression == 'gzip'):\n #-- read input ascii data from gzip compressed file and split lines\n with gzip.open(self.filename,'r') as f:\n file_contents = f.read().decode('ISO-8859-1').splitlines()\n elif (compression == 'zip'):\n #-- read input ascii data from zipped file and split lines\n base,extension = os.path.splitext(self.filename)\n with zipfile.ZipFile(self.filename) as z:\n file_contents = z.read(base).decode('ISO-8859-1').splitlines()\n else:\n #-- read input ascii file (.txt, .asc) and split lines\n with open(self.filename,'r') as f:\n file_contents = f.read().splitlines()\n #-- compile regular expression operator for extracting numerical values\n #-- from input ascii files of spherical harmonics\n regex_pattern = r'[-+]?(?:(?:\\d*\\.\\d+)|(?:\\d+\\.?))(?:[EeD][+-]?\\d+)?'\n rx = re.compile(regex_pattern, re.VERBOSE)\n #-- find maximum degree and order of harmonics\n self.lmax = 0\n self.mmax = 0\n #-- for each line in the file\n for line in file_contents:\n if date:\n l1,m1,clm1,slm1,time = rx.findall(line)\n else:\n l1,m1,clm1,slm1 = rx.findall(line)\n #-- convert line degree and order to integers\n l1,m1 = np.array([l1,m1],dtype=np.int)\n self.lmax = np.copy(l1) if (l1 > self.lmax) else self.lmax\n self.mmax = np.copy(m1) if (m1 > self.mmax) else self.mmax\n #-- output spherical harmonics dimensions array\n self.l = np.arange(self.lmax+1)\n self.m = np.arange(self.mmax+1)\n #-- output spherical harmonics data\n self.clm = np.zeros((self.lmax+1,self.mmax+1))\n self.slm = np.zeros((self.lmax+1,self.mmax+1))\n #-- if the ascii file contains date variables\n if date:\n self.time = np.float(time)\n self.month = np.int(12.0*(self.time - 2002.0)) + 1\n #-- extract harmonics and convert to matrix\n #-- for each line in the file\n for line in file_contents:\n if date:\n l1,m1,clm1,slm1,time = rx.findall(line)\n else:\n l1,m1,clm1,slm1 = rx.findall(line)\n #-- convert line degree and order to integers\n ll,mm = np.array([l1,m1],dtype=np.int)\n #-- convert fortran exponentials if applicable\n self.clm[ll,mm] = np.float(clm1.replace('D','E'))\n self.slm[ll,mm] = np.float(slm1.replace('D','E'))\n #-- assign shape and ndim attributes\n self.update_dimensions()\n return self", "def readIMPACT(filename='test.in'):\n file = open(filename,'r')\n lines = file.readlines()\n file.close()\n row_end = len(lines)\n row_start = 11\n for i in range(0,row_end):\n if lines[i][:12] == '!==lattice==':\n row_start=i\n break\n lattice=str2lattice(lines[row_start+1:])\n updateLattice(lattice)\n beam=str2beam(lines[0:row_start])\n return beam, lattice", "def read_elcentro(scale=1., total_time=40):\n input_dict = {'S90W': ['USACA47.035.txt', 45, 381]}\n comp = 'S90W'\n dt, freq = 0.02, 50 # dt [s], freq [Hz]\n with open(input_dict[comp][0], 'r') as f:\n accel = []\n for i, line in enumerate(f):\n if (i > input_dict[comp][1]) and (i < input_dict[comp][2]):\n row = line.replace('\\n', '').split()\n accel.extend([-scale * float(r) for r in row])\n time_vec = np.linspace(0., len(accel) * dt, len(accel)+1)\n return time_vec[:freq*total_time+1], np.array(accel)[:freq*total_time+1]", "def loadsir(filename):\n\n fid = open(filename)\n data_types = dtype(\"int16\").newbyteorder(\">\")\n data_typec = dtype(\"int8\").newbyteorder(\">\")\n data_typef = dtype(\"float32\").newbyteorder(\">\")\n\n # read header\n head = double(fromfile(fid, dtype=data_types, count=256, sep=\"\"))\n\n nhtype = head[4]\n if nhtype < 20:\n nhtype = 1.0\n head[4] = 1.0\n\n nhead = head[40]\n if nhtype == 1:\n nhead = 1.0\n head[40] = 1.0\n head[41] = 0.0\n head[42] = 0.0\n head[43] = 0.0\n\n ndes = head[41]\n ldes = head[42]\n nia = head[43]\n idatatype = head[47]\n iopt = head[16] # transformation option\n\n if nhtype < 30: # old header format\n # set version 3.0 parameters to header version 2.0 defaults\n if iopt == -1: # image only\n ideg_sc = 10.0\n iscale_sc = 1000.0\n i0_sc = 100.0\n ixdeg_off = 0.0\n iydeg_off = 0.0\n ia0_off = 0.0\n ib0_off = 0.0\n elif iopt == 0: # rectalinear lat/lon\n ideg_sc = 100.0\n iscale_sc = 1000.0\n i0_sc = 100.0\n ixdeg_off = -100.0\n iydeg_off = 0.0\n ia0_off = 0.0\n ib0_off = 0.0\n elif (iopt == 1) or (iopt == 2): # lambert\n ideg_sc = 100.0\n iscale_sc = 1000.0\n i0_sc = 1.0\n ixdeg_off = 0.0\n iydeg_off = 0.0\n ia0_off = 0.0\n ib0_off = 0.0\n elif iopt == 5: # polar stereographic\n ideg_sc = 100.0\n iscale_sc = 100.0\n i0_sc = 1.0\n ixdeg_off = -100.0\n iydeg_off = 0.0\n ia0_off = 0.0\n ib0_off = 0.0\n elif (iopt == 8) or (iopt == 9) or (iopt == 10): # EASE2 grid\n ideg_sc = 10.0\n iscale_sc = 1000.0\n i0_sc = 1.0\n ixdeg_off = 0.0\n iydeg_off = 0.0\n ia0_off = 0.0\n ib0_off = 0.0\n elif (iopt == 11) or (iopt == 12) or (iopt == 13): # EASE grid\n ideg_sc = 10.0\n iscale_sc = 1000.0\n i0_sc = 10.0\n ixdeg_off = 0.0\n iydeg_off = 0.0\n ia0_off = 0.0\n ib0_off = 0.0\n else: # unknown default scaling\n ideg_sc = 100.0\n iscale_sc = 1000.0\n i0_sc = 100.0\n ixdeg_off = 0.0\n iydeg_off = 0.0\n ia0_off = 0.0\n ib0_off = 0.0\n\n head[39] = iscale_sc\n head[126] = ixdeg_off\n head[127] = iydeg_off\n head[168] = ideg_sc\n head[189] = ia0_off\n head[240] = ib0_off\n head[255] = i0_sc\n else: # get projection parameters offset and scale factors\n iscale_sc = head[39]\n ixdeg_off = head[126]\n iydeg_off = head[127]\n ideg_sc = head[168]\n ia0_off = head[189]\n ib0_off = head[240]\n i0_sc = head[255]\n\n # decode projection transformation\n xdeg = head[2] / ideg_sc - ixdeg_off\n ydeg = head[3] / ideg_sc - iydeg_off\n ascale = head[5] / iscale_sc\n bscale = head[6] / iscale_sc\n a0 = head[7] / i0_sc - ia0_off\n b0 = head[8] / i0_sc - ib0_off\n # get special cases which depend on transformation option\n if iopt == -1: # image only\n pass\n elif iopt == 0: # rectalinear lat/lon\n pass\n elif (iopt == 1) or (iopt == 2): # lambert\n ascale = iscale_sc / head[5]\n bscale = iscale_sc / head[6]\n elif iopt == 5: # polar stereographic\n pass\n elif (iopt == 8) or (iopt == 9) or (iopt == 10): # EASE2 grid\n pass\n elif (iopt == 11) or (iopt == 12) or (iopt == 13): # EASE grid\n ascale = 2.0 * (head[5] / iscale_sc) * 6371.228 / 25.067525\n bscale = 2.0 * (head[6] / iscale_sc) * 25.067525\n else: # unknown default scaling\n print(\"*** Unrecognized SIR option in loadsir ***\")\n\n head[2] = xdeg\n head[3] = ydeg\n head[5] = ascale\n head[6] = bscale\n head[7] = a0\n head[8] = b0\n\n if head[10] == 0: # iscale\n head[10] = 1.0\n\n s = 1.0 / head[10]\n soff = 32767.0 / head[10]\n if idatatype == 1:\n soff = 128.0 / head[10]\n\n ioff = head[9]\n anodata = head[48] * s + ioff + soff\n vmin = head[49] * s + ioff + soff\n vmax = head[50] * s + ioff + soff\n\n if idatatype == 4: # floating point file -- very rare\n # fid.close()\n fid2 = open(filename)\n fromfile(fid2, dtype=data_types, count=51, sep=\"\")\n fl = double(fromfile(fid2, dtype=data_typef, count=3, sep=\"\"))\n fid2.close()\n # fid = file(filename)\n # fromfile(fid,dtype=data_types,count=256,sep=\"\")\n anodata = fl[0]\n vmin = fl[1]\n vmax = fl[2]\n\n head[45] = head[45] * 0.1\n head[48] = anodata\n head[49] = vmin\n head[50] = vmax\n\n descrip = []\n iaopt = []\n\n if nhead > 1:\n if ndes > 0:\n descrip = double(fromfile(fid, dtype=data_typec, count=ndes * 512, sep=\"\"))\n descrip = transpose(descrip[1:ldes])\n m, n = descrip.shape\n for j in range(1, n / 2 + 1):\n k = (j - 1) * 2 + 1\n t = descrip[k - 1]\n descrip[k - 1] = descrip[k]\n descrip[k] = t\n if nia > 0:\n nia1 = 256.0 * ceil(nia / 256)\n iaopt = double(fromfile(fid, dtype=data_types, count=nia1, sep=\"\"))\n iaopt = transpose(iaopt[1:nia])\n # read image data\n\n if idatatype == 1: # very rare\n # disp(['Read byte data: ' num2str(head(1)) ' x ' num2str(head(2))]);\n im_in = double(\n fromfile(fid, dtype=data_typec, count=int(head[0] * head[1]), sep=\"\")\n ) # read byte image data\n image = flipud(\n reshape(s * im_in + soff + ioff, (head[1], head[0]), order=\"C\")\n ) # scale data to floating point and\n # change origin location\n elif idatatype == 4: # rare\n # disp(['Read float data: ' num2str(head(1)) ' x ' num2str(head(2))]);\n im_in = double(\n fromfile(fid, dtype=data_typef, count=int(head[0] * head[1]), sep=\"\")\n )\n image = flipud(\n reshape(im_in, (head[1], head[0]), order=\"C\")\n ) # read floating point data\n else: # most commonly used\n # disp(['Read integer data: ' num2str(head(1)) ' x ' num2str(head(2))]);\n im_in = double(\n fromfile(fid, dtype=data_types, count=int(head[0] * head[1]), sep=\"\")\n ) # read integer image data\n image = flipud(\n reshape(s * im_in + soff + ioff, (int(head[1]), int(head[0])), order=\"C\")\n ) # scale data to floating point and\n # change origin location for display\n\n if nhtype == 1: # if old-style header, set default values\n vmin = min(image.flatten(1))\n vmax = max(image.flatten(1))\n anodata = vmin\n head[48] = anodata\n head[49] = vmin\n head[50] = vmax\n if vmin == -32:\n head[18] = 1.0\n elif vmin == -3.2:\n head[18] = 2.0\n\n head[44] = 2.0\n head[45] = 53.0\n\n fid.close()\n return image, head, descrip, iaopt", "def read_asc_grid(filename, footer=0):\r\n\r\n ncols = None\r\n nrows = None\r\n xllcorner = None\r\n xllcenter = None\r\n yllcorner = None\r\n yllcenter = None\r\n cellsize = None\r\n dx = None\r\n dy = None\r\n no_data = None\r\n header_lines = 0\r\n with io.open(filename, 'r') as f:\r\n while True:\r\n string, value = f.readline().split()\r\n header_lines += 1\r\n if string.lower() == 'ncols':\r\n ncols = int(value)\r\n elif string.lower() == 'nrows':\r\n nrows = int(value)\r\n elif string.lower() == 'xllcorner':\r\n xllcorner = float(value)\r\n elif string.lower() == 'xllcenter':\r\n xllcenter = float(value)\r\n elif string.lower() == 'yllcorner':\r\n yllcorner = float(value)\r\n elif string.lower() == 'yllcenter':\r\n yllcenter = float(value)\r\n elif string.lower() == 'cellsize':\r\n cellsize = float(value)\r\n elif string.lower() == 'cell_size':\r\n cellsize = float(value)\r\n elif string.lower() == 'dx':\r\n dx = float(value)\r\n elif string.lower() == 'dy':\r\n dy = float(value)\r\n elif string.lower() == 'nodata_value':\r\n no_data = float(value)\r\n elif string.lower() == 'nodatavalue':\r\n no_data = float(value)\r\n else:\r\n raise IOError(\"could not read *.asc file. Error in header.\")\r\n\r\n if (ncols is not None) and \\\r\n (nrows is not None) and \\\r\n (((xllcorner is not None) and (yllcorner is not None)) or\r\n ((xllcenter is not None) and (yllcenter is not None))) and \\\r\n ((cellsize is not None) or ((dx is not None) and (dy is not None))) and \\\r\n (no_data is not None):\r\n break\r\n\r\n raw_grid_array = np.genfromtxt(filename, skip_header=header_lines,\r\n skip_footer=footer)\r\n grid_array = np.flipud(raw_grid_array)\r\n\r\n if nrows != grid_array.shape[0] or ncols != grid_array.shape[1]:\r\n raise IOError(\"Error reading *.asc file. Encountered problem \"\r\n \"with header: NCOLS and/or NROWS does not match \"\r\n \"number of columns/rows in data file body.\")\r\n\r\n if xllcorner is not None and yllcorner is not None:\r\n if dx is not None and dy is not None:\r\n xllcenter = xllcorner + dx/2.0\r\n yllcenter = yllcorner + dy/2.0\r\n else:\r\n xllcenter = xllcorner + cellsize/2.0\r\n yllcenter = yllcorner + cellsize/2.0\r\n\r\n if dx is not None and dy is not None:\r\n x = np.arange(xllcenter, xllcenter + ncols*dx, dx)\r\n y = np.arange(yllcenter, yllcenter + nrows*dy, dy)\r\n else:\r\n x = np.arange(xllcenter, xllcenter + ncols*cellsize, cellsize)\r\n y = np.arange(yllcenter, yllcenter + nrows*cellsize, cellsize)\r\n\r\n # Sometimes x and y and can be an entry too long due to imprecision\r\n # in calculating the upper cutoff for np.arange(); this bit takes care of\r\n # that potential problem.\r\n if x.size == ncols + 1:\r\n x = x[:-1]\r\n if y.size == nrows + 1:\r\n y = y[:-1]\r\n\r\n if cellsize is None:\r\n cellsize = (dx, dy)\r\n\r\n return grid_array, x, y, cellsize, no_data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a set of grain orientations from a text file. The text file must be organised in 3 columns (the other are ignored), corresponding to either the three euler angles or the three rodrigues veotor components, depending on the data_type). Internally the ascii file is read by the genfromtxt function of numpy, additional keyworks (such as the delimiter) can be passed to via the kwargs dictionnary.
def read_orientations(txt_path, data_type='euler', **kwargs): data = np.genfromtxt(txt_path, **kwargs) size = len(data) orientations = [] for i in range(size): angles = np.array([float(data[i, 0]), float(data[i, 1]), float(data[i, 2])]) if data_type == 'euler': orientations.append([i + 1, Orientation.from_euler(angles)]) elif data_type == 'rodrigues': orientations.append([i + 1, Orientation.from_rodrigues(angles)]) return dict(orientations)
[ "def read_txt_grains(fname):\n\n # Note: (21) fields named below with an underscore are not yet used\n #\n # Fields from grains.out header:\n \"\"\"grain ID completeness chi2\n xi[0] xi[1] xi[2]\n tVec_c[0] tVec_c[1] tVec_c[2]\n vInv_s[0] vInv_s[1] vInv_s[2] vInv_s[4]*sqrt(2) vInv_s[5]*sqrt(2) vInv_s[6]*sqrt(2)\n ln(V[0,0]) ln(V[1,1]) ln(V[2,2]) ln(V[1,2]) ln(V[0,2]) ln(V[0,1])\"\"\"\n\n # Use shortened names in construction of numpy data type.\n\n d = {'names': ('id', 'completeness', 'chisq',\n 'ori_0', 'ori_1', 'ori_2',\n 'cen_0', 'cen_1', 'cen_2',\n 'vi0', 'vi1', 'vi2', 'vi3', 'vi4', 'vi5',\n 'lnV00', 'lnV11', 'lnV22', 'lnV12', 'lnV02', 'lnV01'),\n 'formats': ('i4',) + 20*('f4',)}\n\n return np.loadtxt(fname, dtype=d)", "def read_orientation_file(path):\n rotationdf = pd.read_csv(\n path,\n sep=' ',\n index_col=0,\n names=['strip', 'direction'],\n header=None\n )\n rotationdf['direction'] = rotationdf['direction'].astype(int)\n return rotationdf", "def load_gait_txt(filename, num_phases=2):\n\n data = np.loadtxt(filename, delimiter=',', dtype='float32')\n\n data_shape = data.shape\n\n num_coeffs = data_shape[1]\n\n # m*n rows of bezier coefficients followed by m rows of phase duration info\n # m is num of phases\n # n is num of outputs\n num_outputs = NUM_MOTORS # one outputs for each motor dof.\n\n vircons = np.zeros([num_phases, num_outputs, num_coeffs])\n phasevars = np.zeros([num_phases, 1])\n\n for phase in range(num_phases):\n vircons[phase, :, :] = data[phase * num_outputs:(phase + 1) *\n num_outputs, :num_coeffs]\n phasevars[phase] = data[num_outputs * num_phases + phase, 0]\n\n return vircons, phasevars", "def from_grain_file(grain_file_path, col_id=0, col_phi1=1, col_phi=2, col_phi2=3, col_x=4, col_y=5, col_z=None, col_volume=None):\n # get the file name without extension\n name = os.path.splitext(os.path.basename(grain_file_path))[0]\n print('creating microstructure %s' % name)\n micro = Microstructure(name=name)\n\n # read grain infos from the grain file\n grains_EBSD = np.genfromtxt(grain_file_path)\n for i in range(len(grains_EBSD)):\n o = Orientation.from_euler([grains_EBSD[i, col_phi1], grains_EBSD[i, col_phi], grains_EBSD[i, col_phi2]])\n g = Grain(int(grains_EBSD[i, col_id]), o)\n z = grains_EBSD[i, col_z] if col_z else 0.\n g.position = np.array([grains_EBSD[i, col_x], grains_EBSD[i, col_y], z])\n if col_volume:\n g.volume = grains_EBSD[i, col_volume]\n micro.grains.append(g)\n return micro", "def _read(self, datafile):\n inData = open(datafile, 'r')\n x = []\n y = []\n z = []\n\n # First read till end of header\n inData.readline()\n\n for line in inData:\n columns = line.split()\n x.append(columns[0])\n y.append(columns[1])\n z.append(columns[2])\n\n self.x,self.y,self.z = np.asarray(x), np.asarray(y), np.asarray(z)\n inData.close()", "def read_asc_grid(filename, footer=0):\r\n\r\n ncols = None\r\n nrows = None\r\n xllcorner = None\r\n xllcenter = None\r\n yllcorner = None\r\n yllcenter = None\r\n cellsize = None\r\n dx = None\r\n dy = None\r\n no_data = None\r\n header_lines = 0\r\n with io.open(filename, 'r') as f:\r\n while True:\r\n string, value = f.readline().split()\r\n header_lines += 1\r\n if string.lower() == 'ncols':\r\n ncols = int(value)\r\n elif string.lower() == 'nrows':\r\n nrows = int(value)\r\n elif string.lower() == 'xllcorner':\r\n xllcorner = float(value)\r\n elif string.lower() == 'xllcenter':\r\n xllcenter = float(value)\r\n elif string.lower() == 'yllcorner':\r\n yllcorner = float(value)\r\n elif string.lower() == 'yllcenter':\r\n yllcenter = float(value)\r\n elif string.lower() == 'cellsize':\r\n cellsize = float(value)\r\n elif string.lower() == 'cell_size':\r\n cellsize = float(value)\r\n elif string.lower() == 'dx':\r\n dx = float(value)\r\n elif string.lower() == 'dy':\r\n dy = float(value)\r\n elif string.lower() == 'nodata_value':\r\n no_data = float(value)\r\n elif string.lower() == 'nodatavalue':\r\n no_data = float(value)\r\n else:\r\n raise IOError(\"could not read *.asc file. Error in header.\")\r\n\r\n if (ncols is not None) and \\\r\n (nrows is not None) and \\\r\n (((xllcorner is not None) and (yllcorner is not None)) or\r\n ((xllcenter is not None) and (yllcenter is not None))) and \\\r\n ((cellsize is not None) or ((dx is not None) and (dy is not None))) and \\\r\n (no_data is not None):\r\n break\r\n\r\n raw_grid_array = np.genfromtxt(filename, skip_header=header_lines,\r\n skip_footer=footer)\r\n grid_array = np.flipud(raw_grid_array)\r\n\r\n if nrows != grid_array.shape[0] or ncols != grid_array.shape[1]:\r\n raise IOError(\"Error reading *.asc file. Encountered problem \"\r\n \"with header: NCOLS and/or NROWS does not match \"\r\n \"number of columns/rows in data file body.\")\r\n\r\n if xllcorner is not None and yllcorner is not None:\r\n if dx is not None and dy is not None:\r\n xllcenter = xllcorner + dx/2.0\r\n yllcenter = yllcorner + dy/2.0\r\n else:\r\n xllcenter = xllcorner + cellsize/2.0\r\n yllcenter = yllcorner + cellsize/2.0\r\n\r\n if dx is not None and dy is not None:\r\n x = np.arange(xllcenter, xllcenter + ncols*dx, dx)\r\n y = np.arange(yllcenter, yllcenter + nrows*dy, dy)\r\n else:\r\n x = np.arange(xllcenter, xllcenter + ncols*cellsize, cellsize)\r\n y = np.arange(yllcenter, yllcenter + nrows*cellsize, cellsize)\r\n\r\n # Sometimes x and y and can be an entry too long due to imprecision\r\n # in calculating the upper cutoff for np.arange(); this bit takes care of\r\n # that potential problem.\r\n if x.size == ncols + 1:\r\n x = x[:-1]\r\n if y.size == nrows + 1:\r\n y = y[:-1]\r\n\r\n if cellsize is None:\r\n cellsize = (dx, dy)\r\n\r\n return grid_array, x, y, cellsize, no_data", "def read(self,data_path=\"./\",file_name='gauges.data'):\n path = os.path.join(data_path, file_name)\n gauge_file = open(path,'r')\n\n # Read past comments and blank lines\n header_lines = 0\n ignore_lines = True\n while ignore_lines:\n line = gauge_file.readline()\n if line[0] == \"#\" or len(line.strip()) == 0:\n header_lines += 1\n else:\n break\n\n # Read number of gauges, should be line that was last read in\n num_gauges = int(line.split()[0])\n\n # Read in each gauge line\n for n in xrange(num_gauges):\n line = gauge_file.readline().split()\n self.gauges.append([int(line[0]),float(line[1]),float(line[2]),\n float(line[3]),float(line[4])])", "def read_geometry_file(path_to_file):\n logger.info(\"Reading geometry file.\")\n with open(path_to_file) as f:\n lines = f.readlines()\n\n vec_x = lines[3].split()\n vec_y = lines[4].split()\n vec_z = lines[5].split()\n\n vec_x = [float(vec_x[i]) for i in range(1, len(vec_x))]\n vec_y = [float(vec_y[i]) for i in range(1, len(vec_y))]\n vec_z = [float(vec_z[i]) for i in range(1, len(vec_z))]\n\n vectors = [vec_x, vec_y, vec_z]\n uc_atoms = []\n for i in range(6, len(lines)):\n sl = lines[i].split()\n x = float(sl[1])\n y = float(sl[2])\n z = float(sl[3])\n t = sl[4]\n\n if sl[4] == \"Ga\":\n c = ga_mass\n elif sl[4] == \"Al\":\n c = al_mass\n elif sl[4] == \"In\":\n c = in_mass\n elif sl[4] == \"O\":\n c = o_mass\n\n global_atom_types[t] = global_atom_types[t] + 1\n\n a = Atom(x, y, z, t, c)\n uc_atoms.append(a)\n logger.info(\"Geomtery file read.\")\n # uc_atoms = UCAtoms(uc_atoms)\n\n return vectors, uc_atoms", "def load_position_file(self, file_name, fourier_flag = 0, conv_to_rads = 0, indices = None):\r\n values = []\r\n try:\r\n f = open(file_name, \"r\")\r\n index = 0\r\n last_time = 0\r\n last_px = 0\r\n last_py = 0\r\n last_pz = 0\r\n last_vx = 0\r\n last_vy = 0\r\n last_vz = 0\r\n last_roll = 0\r\n last_pitch = 0\r\n last_yaw = 0\r\n for line in f:\r\n if(index == 0):\r\n index += 1\r\n continue\r\n line_val = line.split()\r\n if(not self.isfloat(line_val[0])):\r\n continue\r\n #print(line_val)\r\n wp = float(line_val[0])\r\n time = float(line_val[1])\r\n px = float(line_val[2])\r\n py = float(line_val[3])\r\n pz = float(line_val[4])\r\n roll = float(line_val[5])\r\n pitch = float(line_val[6])\r\n yaw = float(line_val[7])\r\n if(conv_to_rads == 1):\r\n roll = roll/180 * np.pi\r\n pitch = pitch/180 * np.pi\r\n yaw = yaw/180 * np.pi\r\n if(last_time == 0 or (time-last_time) == 0): \r\n values.append(np.array([wp, time\r\n , px, py, pz\r\n , roll, pitch, yaw\r\n , 0, 0, 0\r\n , 0, 0, 0\r\n , 0, 0, 0]))\r\n \r\n else: \r\n #print(\"here\")\r\n vx = (px - last_px)/(time - last_time)\r\n vy = (py - last_py)/(time - last_time)\r\n vz = (pz - last_pz)/(time - last_time)\r\n ax = (vx - last_vx)/(time - last_time)\r\n ay = (vy - last_vy)/(time - last_time)\r\n az = (vz - last_vz)/(time - last_time)\r\n r_dot = (roll - last_roll)/(time - last_time)\r\n p_dot = (pitch - last_pitch)/(time - last_time)\r\n y_dot = (yaw - last_yaw)/(time - last_time)\r\n values.append(np.array([wp, time\r\n , px, py, pz\r\n , roll, pitch, yaw\r\n , vx, vy, vz\r\n , r_dot, p_dot, y_dot\r\n , ax, ay, az])) \r\n last_vx = vx\r\n last_vy = vy\r\n last_vz = vz\r\n last_time = time\r\n last_px = px\r\n last_py = py\r\n last_pz = pz\r\n last_roll = roll\r\n last_pitch = pitch\r\n last_yaw = yaw\r\n index += 1\r\n if(indices is None):\r\n start_index = 0\r\n end_index = len(values)\r\n else:\r\n start_index = indices[0]\r\n end_index = indices[1]\r\n values = np.array(values).T[:,start_index:end_index]\r\n # Get the final movement index. Assume for that the following:\r\n # The position is constant after that index\r\n # This means that change in position is minimal\r\n # Furthermore, change of change is also minimal\r\n # This obviously doesn't work if the platform stands still for \r\n # a while. But it does the job if a vibration is applied to the\r\n # system, to cut off non relevant parts for the fourier transform\r\n # Is probably too fickle. Use manual trimming instead.\r\n# values_rel = values / np.max(np.abs(values),1).reshape(values.shape[0],1)\r\n# d_new_time, d_values = self.get_time_derivative(values[0,:], values)\r\n# d_values_rel = d_values / np.max(np.abs(d_values), 1).reshape((d_values.shape[0],1))\r\n# d_time_matched, d_values_rel_matched = self.interpolate_to_array(values[0,:], d_new_time, d_values_rel)\r\n# dd_new_time, dd_values = self.get_time_derivative(d_time_matched, d_values_rel_matched)\r\n# dd_values_rel = dd_values / np.max(np.abs(dd_values), 1).reshape(dd_values.shape[0],1)\r\n# dd_time_matched, dd_values_rel_matched = self.interpolate_to_array(values[0,:], dd_new_time, d_values_rel)\r\n# end_indices = np.argmin(np.abs(dd_values_rel_matched)\r\n# + np.abs(d_values_rel_matched)\r\n# + np.abs(values_rel)\r\n# - np.abs(values_rel[:,-1]).reshape((values.shape[0],1)), 1)\r\n \r\n# print(end_indices)\r\n# end_index = np.max(end_indices)\r\n# print(end_index)\r\n finally:\r\n f.close()\r\n if(fourier_flag == 1):\r\n# val_for_fourier = values[:,0:end_index]\r\n y_k, x_hz, y_k_abs, y_k_phase = self.calculate_fourier_transforms(values.T).T\r\n return values, x_hz, np.array(y_k), y_k_abs, y_k_phase\r\n else:\r\n return values", "def genfromtxt(fname, dtype=float, comments='#', delimiter=None,\r\n skiprows=0, skip_header=0, skip_footer=0, converters=None,\r\n missing='', missing_values=None, filling_values=None,\r\n usecols=None, names=None, excludelist=None, deletechars=None,\r\n autostrip=False, case_sensitive=True, defaultfmt=\"f%i\",\r\n unpack=None, usemask=False, loose=True, invalid_raise=True):\r\n #\r\n if usemask:\r\n from numpy.ma import MaskedArray, make_mask_descr\r\n # Check the input dictionary of converters\r\n user_converters = converters or {}\r\n if not isinstance(user_converters, dict):\r\n errmsg = \"The input argument 'converter' should be a valid dictionary \"\\\r\n \"(got '%s' instead)\"\r\n raise TypeError(errmsg % type(user_converters))\r\n\r\n # Initialize the filehandle, the LineSplitter and the NameValidator\r\n if isinstance(fname, basestring):\r\n fhd = np.lib._datasource.open(fname)\r\n elif not hasattr(fname, 'read'):\r\n raise TypeError(\"The input should be a string or a filehandle. \"\\\r\n \"(got %s instead)\" % type(fname))\r\n else:\r\n fhd = fname\r\n split_line = LineSplitter(delimiter=delimiter, comments=comments,\r\n autostrip=autostrip)._handyman\r\n validate_names = NameValidator(excludelist=excludelist,\r\n deletechars=deletechars,\r\n case_sensitive=case_sensitive)\r\n\r\n # Get the first valid lines after the first skiprows ones ..\r\n if skiprows:\r\n warnings.warn(\"The use of `skiprows` is deprecated.\\n\"\\\r\n \"Please use `skip_header` instead.\",\r\n DeprecationWarning)\r\n skip_header = skiprows\r\n # Skip the first `skip_header` rows\r\n for i in xrange(skip_header):\r\n fhd.readline()\r\n # Keep on until we find the first valid values\r\n first_values = None\r\n while not first_values:\r\n first_line = fhd.readline()\r\n if first_line == '':\r\n raise IOError('End-of-file reached before encountering data.')\r\n if names is True:\r\n if comments in first_line:\r\n first_line = ''.join(first_line.split(comments)[1])\r\n first_values = split_line(first_line)\r\n # Should we take the first values as names ?\r\n if names is True:\r\n fval = first_values[0].strip()\r\n if fval in comments:\r\n del first_values[0]\r\n\r\n # Check the columns to use\r\n if usecols is not None:\r\n try:\r\n usecols = [_.strip() for _ in usecols.split(\",\")]\r\n except AttributeError:\r\n try:\r\n usecols = list(usecols)\r\n except TypeError:\r\n usecols = [usecols, ]\r\n nbcols = len(usecols or first_values)\r\n\r\n # Check the names and overwrite the dtype.names if needed\r\n if names is True:\r\n names = validate_names([_.strip() for _ in first_values])\r\n first_line = ''\r\n elif _is_string_like(names):\r\n names = validate_names([_.strip() for _ in names.split(',')])\r\n elif names:\r\n names = validate_names(names)\r\n # Get the dtype\r\n if dtype is not None:\r\n dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)\r\n names = dtype.names\r\n # Make sure the names is a list (for 2.5)\r\n if names is not None:\r\n names = list(names)\r\n\r\n\r\n if usecols:\r\n for (i, current) in enumerate(usecols):\r\n # if usecols is a list of names, convert to a list of indices\r\n if _is_string_like(current):\r\n usecols[i] = names.index(current)\r\n elif current < 0:\r\n usecols[i] = current + len(first_values)\r\n # If the dtype is not None, make sure we update it\r\n if (dtype is not None) and (len(dtype) > nbcols):\r\n descr = dtype.descr\r\n dtype = np.dtype([descr[_] for _ in usecols])\r\n names = list(dtype.names)\r\n # If `names` is not None, update the names\r\n elif (names is not None) and (len(names) > nbcols):\r\n names = [names[_] for _ in usecols]\r\n\r\n\r\n # Process the missing values ...............................\r\n # Rename missing_values for convenience\r\n user_missing_values = missing_values or ()\r\n\r\n # Define the list of missing_values (one column: one list)\r\n missing_values = [list(['']) for _ in range(nbcols)]\r\n\r\n # We have a dictionary: process it field by field\r\n if isinstance(user_missing_values, dict):\r\n # Loop on the items\r\n for (key, val) in user_missing_values.items():\r\n # Is the key a string ?\r\n if _is_string_like(key):\r\n try:\r\n # Transform it into an integer\r\n key = names.index(key)\r\n except ValueError:\r\n # We couldn't find it: the name must have been dropped, then\r\n continue\r\n # Redefine the key as needed if it's a column number\r\n if usecols:\r\n try:\r\n key = usecols.index(key)\r\n except ValueError:\r\n pass\r\n # Transform the value as a list of string\r\n if isinstance(val, (list, tuple)):\r\n val = [str(_) for _ in val]\r\n else:\r\n val = [str(val), ]\r\n # Add the value(s) to the current list of missing\r\n if key is None:\r\n # None acts as default\r\n for miss in missing_values:\r\n miss.extend(val)\r\n else:\r\n missing_values[key].extend(val)\r\n # We have a sequence : each item matches a column\r\n elif isinstance(user_missing_values, (list, tuple)):\r\n for (value, entry) in zip(user_missing_values, missing_values):\r\n value = str(value)\r\n if value not in entry:\r\n entry.append(value)\r\n # We have a string : apply it to all entries\r\n elif isinstance(user_missing_values, basestring):\r\n user_value = user_missing_values.split(\",\")\r\n for entry in missing_values:\r\n entry.extend(user_value)\r\n # We have something else: apply it to all entries\r\n else:\r\n for entry in missing_values:\r\n entry.extend([str(user_missing_values)])\r\n\r\n # Process the deprecated `missing`\r\n if missing != '':\r\n warnings.warn(\"The use of `missing` is deprecated.\\n\"\\\r\n \"Please use `missing_values` instead.\",\r\n DeprecationWarning)\r\n values = [str(_) for _ in missing.split(\",\")]\r\n for entry in missing_values:\r\n entry.extend(values)\r\n\r\n # Process the filling_values ...............................\r\n # Rename the input for convenience\r\n user_filling_values = filling_values or []\r\n # Define the default\r\n filling_values = [None] * nbcols\r\n # We have a dictionary : update each entry individually\r\n if isinstance(user_filling_values, dict):\r\n for (key, val) in user_filling_values.items():\r\n if _is_string_like(key):\r\n try:\r\n # Transform it into an integer\r\n key = names.index(key)\r\n except ValueError:\r\n # We couldn't find it: the name must have been dropped, then\r\n continue\r\n # Redefine the key if it's a column number and usecols is defined\r\n if usecols:\r\n try:\r\n key = usecols.index(key)\r\n except ValueError:\r\n pass\r\n # Add the value to the list\r\n filling_values[key] = val\r\n # We have a sequence : update on a one-to-one basis\r\n elif isinstance(user_filling_values, (list, tuple)):\r\n n = len(user_filling_values)\r\n if (n <= nbcols):\r\n filling_values[:n] = user_filling_values\r\n else:\r\n filling_values = user_filling_values[:nbcols]\r\n # We have something else : use it for all entries\r\n else:\r\n filling_values = [user_filling_values] * nbcols\r\n\r\n # Initialize the converters ................................\r\n if dtype is None:\r\n # Note: we can't use a [...]*nbcols, as we would have 3 times the same\r\n # ... converter, instead of 3 different converters.\r\n converters = [StringConverter(None, missing_values=miss, default=fill)\r\n for (miss, fill) in zip(missing_values, filling_values)]\r\n else:\r\n dtype_flat = flatten_dtype(dtype, flatten_base=True)\r\n # Initialize the converters\r\n if len(dtype_flat) > 1:\r\n # Flexible type : get a converter from each dtype\r\n zipit = zip(dtype_flat, missing_values, filling_values)\r\n converters = [StringConverter(dt, locked=True,\r\n missing_values=miss, default=fill)\r\n for (dt, miss, fill) in zipit]\r\n else:\r\n # Set to a default converter (but w/ different missing values)\r\n zipit = zip(missing_values, filling_values)\r\n converters = [StringConverter(dtype, locked=True,\r\n missing_values=miss, default=fill)\r\n for (miss, fill) in zipit]\r\n # Update the converters to use the user-defined ones\r\n uc_update = []\r\n for (i, conv) in user_converters.items():\r\n # If the converter is specified by column names, use the index instead\r\n if _is_string_like(i):\r\n try:\r\n i = names.index(i)\r\n except ValueError:\r\n continue\r\n elif usecols:\r\n try:\r\n i = usecols.index(i)\r\n except ValueError:\r\n # Unused converter specified\r\n continue\r\n converters[i].update(conv, locked=True,\r\n default=filling_values[i],\r\n missing_values=missing_values[i],)\r\n uc_update.append((i, conv))\r\n # Make sure we have the corrected keys in user_converters...\r\n user_converters.update(uc_update)\r\n\r\n miss_chars = [_.missing_values for _ in converters]\r\n\r\n\r\n # Initialize the output lists ...\r\n # ... rows\r\n rows = []\r\n append_to_rows = rows.append\r\n # ... masks\r\n if usemask:\r\n masks = []\r\n append_to_masks = masks.append\r\n # ... invalid\r\n invalid = []\r\n append_to_invalid = invalid.append\r\n\r\n # Parse each line\r\n for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):\r\n values = split_line(line)\r\n nbvalues = len(values)\r\n # Skip an empty line\r\n if nbvalues == 0:\r\n continue\r\n # Select only the columns we need\r\n if usecols:\r\n try:\r\n values = [values[_] for _ in usecols]\r\n except IndexError:\r\n append_to_invalid((i, nbvalues))\r\n continue\r\n elif nbvalues != nbcols:\r\n append_to_invalid((i, nbvalues))\r\n continue\r\n # Store the values\r\n append_to_rows(tuple(values))\r\n if usemask:\r\n append_to_masks(tuple([v.strip() in m\r\n for (v, m) in zip(values, missing_values)]))\r\n\r\n # Strip the last skip_footer data\r\n if skip_footer > 0:\r\n rows = rows[:-skip_footer]\r\n if usemask:\r\n masks = masks[:-skip_footer]\r\n\r\n # Upgrade the converters (if needed)\r\n if dtype is None:\r\n for (i, converter) in enumerate(converters):\r\n current_column = map(itemgetter(i), rows)\r\n try:\r\n converter.iterupgrade(current_column)\r\n except ConverterLockError:\r\n errmsg = \"Converter #%i is locked and cannot be upgraded: \" % i\r\n current_column = itertools.imap(itemgetter(i), rows)\r\n for (j, value) in enumerate(current_column):\r\n try:\r\n converter.upgrade(value)\r\n except (ConverterError, ValueError):\r\n errmsg += \"(occurred line #%i for value '%s')\"\r\n errmsg %= (j + 1 + skip_header, value)\r\n raise ConverterError(errmsg)\r\n\r\n # Check that we don't have invalid values\r\n if len(invalid) > 0:\r\n nbrows = len(rows)\r\n # Construct the error message\r\n template = \" Line #%%i (got %%i columns instead of %i)\" % nbcols\r\n if skip_footer > 0:\r\n nbrows -= skip_footer\r\n errmsg = [template % (i + skip_header + 1, nb)\r\n for (i, nb) in invalid if i < nbrows]\r\n else:\r\n errmsg = [template % (i + skip_header + 1, nb)\r\n for (i, nb) in invalid]\r\n if len(errmsg):\r\n errmsg.insert(0, \"Some errors were detected !\")\r\n errmsg = \"\\n\".join(errmsg)\r\n # Raise an exception ?\r\n if invalid_raise:\r\n raise ValueError(errmsg)\r\n # Issue a warning ?\r\n else:\r\n warnings.warn(errmsg, ConversionWarning)\r\n\r\n # Convert each value according to the converter:\r\n # We want to modify the list in place to avoid creating a new one...\r\n# if loose:\r\n# conversionfuncs = [conv._loose_call for conv in converters]\r\n# else:\r\n# conversionfuncs = [conv._strict_call for conv in converters]\r\n# for (i, vals) in enumerate(rows):\r\n# rows[i] = tuple([convert(val)\r\n# for (convert, val) in zip(conversionfuncs, vals)])\r\n if loose:\r\n rows = zip(*(map(converter._loose_call, map(itemgetter(i), rows))\r\n for (i, converter) in enumerate(converters)))\r\n else:\r\n rows = zip(*(map(converter._strict_call, map(itemgetter(i), rows))\r\n for (i, converter) in enumerate(converters)))\r\n # Reset the dtype\r\n data = rows\r\n if dtype is None:\r\n # Get the dtypes from the types of the converters\r\n column_types = [conv.type for conv in converters]\r\n # Find the columns with strings...\r\n strcolidx = [i for (i, v) in enumerate(column_types)\r\n if v in (type('S'), np.string_)]\r\n # ... and take the largest number of chars.\r\n for i in strcolidx:\r\n column_types[i] = \"|S%i\" % max(len(row[i]) for row in data)\r\n #\r\n if names is None:\r\n # If the dtype is uniform, don't define names, else use ''\r\n base = set([c.type for c in converters if c._checked])\r\n if len(base) == 1:\r\n (ddtype, mdtype) = (list(base)[0], np.bool)\r\n else:\r\n ddtype = [(defaultfmt % i, dt)\r\n for (i, dt) in enumerate(column_types)]\r\n if usemask:\r\n mdtype = [(defaultfmt % i, np.bool)\r\n for (i, dt) in enumerate(column_types)]\r\n else:\r\n ddtype = zip(names, column_types)\r\n mdtype = zip(names, [np.bool] * len(column_types))\r\n output = np.array(data, dtype=ddtype)\r\n if usemask:\r\n outputmask = np.array(masks, dtype=mdtype)\r\n else:\r\n # Overwrite the initial dtype names if needed\r\n if names and dtype.names:\r\n dtype.names = names\r\n # Case 1. We have a structured type\r\n if len(dtype_flat) > 1:\r\n # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]\r\n # First, create the array using a flattened dtype:\r\n # [('a', int), ('b1', int), ('b2', float)]\r\n # Then, view the array using the specified dtype.\r\n if 'O' in (_.char for _ in dtype_flat):\r\n if has_nested_fields(dtype):\r\n errmsg = \"Nested fields involving objects \"\\\r\n \"are not supported...\"\r\n raise NotImplementedError(errmsg)\r\n else:\r\n output = np.array(data, dtype=dtype)\r\n else:\r\n rows = np.array(data, dtype=[('', _) for _ in dtype_flat])\r\n output = rows.view(dtype)\r\n # Now, process the rowmasks the same way\r\n if usemask:\r\n rowmasks = np.array(masks,\r\n dtype=np.dtype([('', np.bool)\r\n for t in dtype_flat]))\r\n # Construct the new dtype\r\n mdtype = make_mask_descr(dtype)\r\n outputmask = rowmasks.view(mdtype)\r\n # Case #2. We have a basic dtype\r\n else:\r\n # We used some user-defined converters\r\n if user_converters:\r\n ishomogeneous = True\r\n descr = []\r\n for (i, ttype) in enumerate([conv.type for conv in converters]):\r\n # Keep the dtype of the current converter\r\n if i in user_converters:\r\n ishomogeneous &= (ttype == dtype.type)\r\n if ttype == np.string_:\r\n ttype = \"|S%i\" % max(len(row[i]) for row in data)\r\n descr.append(('', ttype))\r\n else:\r\n descr.append(('', dtype))\r\n # So we changed the dtype ?\r\n if not ishomogeneous:\r\n # We have more than one field\r\n if len(descr) > 1:\r\n dtype = np.dtype(descr)\r\n # We have only one field: drop the name if not needed.\r\n else:\r\n dtype = np.dtype(ttype)\r\n #\r\n output = np.array(data, dtype)\r\n if usemask:\r\n if dtype.names:\r\n mdtype = [(_, np.bool) for _ in dtype.names]\r\n else:\r\n mdtype = np.bool\r\n outputmask = np.array(masks, dtype=mdtype)\r\n # Try to take care of the missing data we missed\r\n names = output.dtype.names\r\n if usemask and names:\r\n for (name, conv) in zip(names or (), converters):\r\n missing_values = [conv(_) for _ in conv.missing_values if _ != '']\r\n for mval in missing_values:\r\n outputmask[name] |= (output[name] == mval)\r\n # Construct the final array\r\n if usemask:\r\n output = output.view(MaskedArray)\r\n output._mask = outputmask\r\n if unpack:\r\n return output.squeeze().T\r\n return output.squeeze()", "def read_from_txt(filenames=None, path=None):\n if isinstance(filenames, str):\n filenames = [filenames]\n \n # for reading tab delimited float data:\n string_to_array = lambda string: np.array([float(s) for s in string.split('\\t')])\n strip_end_tabs = lambda string: re.findall(r'^(.*?)[\\t]*$',string)[0]\n \n scans = []\n for filename in filenames:\n print('importing ' + filename) \n with open(filename, 'r') as f:\n lines = f.readlines()\n for i in range(len(lines)):\n if len(re.findall(r'Interferometer Scan', lines[i])) > 0:\n \n # import and format the date\n date = datetime.strptime(\n strip_end_tabs(lines[i + 1]),\n '%A, %B %d, %Y\\t%I:%M:%S %p'\n )\n \n x_position = float(lines[i + 3])\n y_position = float(lines[i + 5])\n \n arm_positions = string_to_array(lines[i + 7])\n fringe_contrasts = string_to_array(lines[i + 9])\n \n gaussian = string_to_array(strip_end_tabs(lines[i + 14]))\n labview_gaussian_fit = GaussianFit(amplitude = gaussian[0],\n damplitude = gaussian[1],\n center = gaussian[2],\n dcenter = gaussian[3],\n width = gaussian[4],\n dwidth = gaussian[5])\n \n scans.append(InterferometerData(date, x_position, y_position,\n arm_positions, fringe_contrasts,\n labview_gaussian_fit))\n return scans", "def readIntervalsFromFile(filePath,dataType):\n return numpy.genfromtxt(filePath, delimiter=',', dtype=dataType)", "def load_byu(fname, arbitrary=False):\n if not arbitrary:\n try:\n with open(fname) as f:\n for i, line in enumerate(f):\n if i == 0:\n # the first gives info about file\n _, nv, nf, _ = [int(n) for n in line.split()]\n vertices = np.empty((3, nv), dtype=float)\n faces = np.empty((nf, 3), dtype=int)\n continue\n elif i == 1:\n continue\n if i <= nv + 1:\n vertices[:, i - 2] = [float(n) for n in line.split()]\n else:\n faces[i - (nv + 2), :] = [np.abs(int(n)) -\n 1 for n in line.split()]\n return vertices, faces\n except:\n print(\"File is in original .byu format\")\n return load_byu(fname, arbitrary=True)\n else:\n face_list = []\n with open(fname) as f:\n for i, line in enumerate(f):\n if i == 0:\n # the first gives info about file\n vals = [int(n) for n in line.split()]\n ns, nv, nf, ne = vals[0], vals[1], vals[2], vals[3]\n vertices = np.empty((3, nv), dtype=float)\n faces = np.empty((nf, 3), dtype=int)\n\n nv_count, nf_count = 0, 0\n continue\n elif i >= 1 and i <= ns:\n # the next ns lines define surfaces\n continue\n elif i > ns and i <= ns + -(-nv // 2):\n vals = line.split()\n vertices[:, 2 * (i - ns - 1)] = [float(n)\n for n in vals[:3]]\n if len(vals) > 3:\n vertices[:, 2 * (i - ns - 1) + 1] = [float(n)\n for n in vals[3:]]\n else:\n vals = [abs(int(n)) - 1 for n in line.split()]\n face_list.extend(vals)\n\n face_list = np.asarray(face_list)\n faces = face_list.reshape((nf, 3))\n\n return vertices, faces", "def add_grain_file(self, filename):\r\n f = open(filename, 'r')\r\n fin = f.read()\r\n grains = fin.split(\"grain,\")\r\n for i in grains:\r\n grain = i.split(\",\")\r\n if grain[0] != '':\r\n self.add_grain(float(grain[0]), float(grain[1]))\r\n f.close()", "def _readHorizon(self, filename):\n self._file = HorizonFile(filename, 'rb')\n self._header = self._file.readHeader()\n\n if self._header == b\"#GeoProbe Horizon V2.0 ascii\\n\":\n raise TypeError('Ascii horizons not currently supported')\n elif self._header != b\"#GeoProbe Horizon V2.0 binary\\n\":\n raise TypeError('This does not appear to be a valid geoprobe'\\\n ' horizon')\n\n self.data = self._file.readAll()\n\n # Surface and line attributes\n self.surface = self._file.surface\n self.lines = self._file.lines\n\n # Oddly enough, Geoprobe (the actual Landmark application) seems to\n # do this a lot...\n # Raise the error here to avoid problems down the road!\n if self.data.size == 0:\n raise ValueError('This file does not contain any points!')", "def readFromFile(self, infile, ignore_strand=False):\n\n self.mForwardRegions = {}\n self.mReverseRegions = {}\n self.mRegions = []\n self.mIgnoreStrand = ignore_strand\n n = 0\n for line in infile:\n if line[0] == \"#\":\n continue\n\n token, sbjct_token, sbjct_strand, sbjct_from, sbjct_to = line[\n :-1].split(\"\\t\")[:5]\n\n if ignore_strand:\n key = sbjct_token\n else:\n key = \"%s-%s\" % (sbjct_token, sbjct_strand)\n\n if key not in self.mForwardRegions:\n self.mForwardRegions[key] = []\n self.mReverseRegions[key] = []\n\n self.mForwardRegions[key].append((int(sbjct_from), n))\n self.mReverseRegions[key].append((int(sbjct_to), n))\n self.mRegions.append((token, sbjct_from, sbjct_to))\n n += 1\n\n for k, v in self.mForwardRegions.items():\n v.sort()\n self.mForwardRegions[k] = (map(lambda x: x[0], v),\n map(lambda x: x[1], v))\n\n for k, v in self.mReverseRegions.items():\n v.sort()\n self.mReverseRegions[k] = (map(lambda x: x[0], v),\n map(lambda x: x[1], v))", "def read(self, patfile, read_celltypes):\r\n import re\r\n\r\n def get_points(line):\r\n line = line.strip()\r\n points = re.findall(r\"[-\\d\\.]+E...\", line)\r\n points = [float(point) for point in points]\r\n return points\r\n\r\n def get_cell(line, num_points, pointsID):\r\n line = line.strip()\r\n cell = re.findall(r\"[\\d]+\", line)[:num_points]\r\n cell = [pointsID[int(point)] for point in cell]\r\n return cell\r\n\r\n meshio_to_patran_type = {\r\n \"line\": 2,\r\n \"triangle\": 3,\r\n \"quad\": 4,\r\n \"tetra\": 5,\r\n \"hexahedron\": 8,\r\n }\r\n\r\n patran_to_meshio_type = {}\r\n assert len(read_celltypes) > 0\r\n for celltype in read_celltypes:\r\n patran_to_meshio_type[meshio_to_patran_type[celltype]] = celltype\r\n\r\n # Read patran file\r\n f = open(patfile, \"r\")\r\n lines = f.read()\r\n lines = lines.replace(\" \", \",\")\r\n for _ in range(15):\r\n lines = lines.replace(\",,\", \",\")\r\n\r\n # Read points\r\n self.pointsID = {}\r\n self.points = []\r\n pointlines = re.findall(r\"\\n(,1,[\\d,]+\\n[,\\d.EG\\+-]+\\n1[G,\\d]+)\", lines)\r\n for i, n in enumerate(pointlines):\r\n self.pointsID[int(n.split(\"\\n\")[0].split(\",\")[2])] = i\r\n self.points.append(get_points(n.split(\"\\n\")[1]))\r\n self.points = np.array(self.points)\r\n\r\n # Read cells\r\n self.cellsID = {}\r\n self.cells = {}\r\n celllines = re.findall(r\"\\n,2,([\\d,E]+\\n[\\d\\.\\+,E]+\\n[\\d,E]+)\", lines)\r\n for e in celllines:\r\n celltype = int(e.split(\",\")[1])\r\n num_points = int(e.split(\"\\n\")[1].split(\",\")[1])\r\n if celltype not in patran_to_meshio_type:\r\n continue\r\n meshio_type = patran_to_meshio_type[celltype]\r\n cellID = int(e.split(\",\")[0])\r\n cell = get_cell(e.split(\"\\n\")[2], num_points, self.pointsID)\r\n if meshio_type in self.cellsID:\r\n self.cellsID[meshio_type].append(cellID)\r\n self.cells[meshio_type].append(cell)\r\n else:\r\n self.cellsID[meshio_type] = [cellID]\r\n self.cells[meshio_type] = [cell]\r\n\r\n for key in self.cells:\r\n self.cells[key] = np.array(self.cells[key], dtype=int)\r\n self.cellsID[key] = np.array(self.cellsID[key], dtype=int)\r\n\r\n self.point_data = {}\r\n self.cell_data = {}\r\n self.field_data = {}", "def readOptics(direction,nSkip=1,fileLoc=''):\n if direction == 'x':\n file = open(fileLoc+'fort.24','r')\n elif direction == 'z':\n file = open(fileLoc+'fort.26','r')\n else :\n file = open(fileLoc+'fort.25','r')\n lines = file.readlines()\n file.close()\n f=[]\n ph = 0\n z0 = 0\n for j in range(0,len(lines),nSkip) :\n z, sigmax, sigmap, alpha, emittance_norm = [ float(lines[j].split()[i]) for i in [0,2,4,5,6] ]\n beta=(1+alpha*alpha)**0.5 *sigmax/sigmap\n ph = ph + (z-z0)/beta\n if direction == 'z':\n f.append( [beta, alpha, emittance_norm, ph, z] )\n else:\n f.append( [beta, alpha, emittance_norm, ph, z] )\n z0=z\n return np.array(f)", "def _read_text(self, file_to_read, delimiter=' ', index=False):\n self._data = pd.read_csv(file_to_read, delimiter=delimiter, index_col=index) \n #np.genfromtxt(file_to_read, dtype='float', names=True) # this is WAY slower\n self._col_names = self._data.columns", "def read_input_file():\n global rows\n global cols\n global maze\n\n with open(filename, \"r\") as fout:\n a = fout.readline()\n rows, cols = tuple(a.split())\n rows = int(rows)\n cols = int(cols)\n\n maze = fout.readlines()\n maze = [list(r.strip(\" \\n\")) for r in maze]\n maze = np.matrix(maze, dtype=str)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a set of grain orientations from a zset input file. In zset input files, the orientation data may be specified either using the rotation of two vector, euler angles or rodrigues components directly. For instance the following lines are extracted from a polycrystalline calculation file
def read_euler_from_zset_inp(inp_path): inp = open(inp_path) lines = inp.readlines() for i, line in enumerate(lines): if line.lstrip().startswith('***material'): break euler_lines = [] for j, line in enumerate(lines[i + 1:]): # read until next *** block if line.lstrip().startswith('***'): break if (not line.lstrip().startswith('%') and line.find('**elset') >= 0): euler_lines.append(line) euler = [] for l in euler_lines: tokens = l.split() elset = tokens[tokens.index('**elset') + 1] irot = tokens.index('*rotation') if tokens[irot + 1] == 'x1': x1 = np.empty(3, dtype=float) x1[0] = float(tokens[irot + 2]) x1[1] = float(tokens[irot + 3]) x1[2] = float(tokens[irot + 4]) x3 = np.empty(3, dtype=float) x3[0] = float(tokens[irot + 6]) x3[1] = float(tokens[irot + 7]) x3[2] = float(tokens[irot + 8]) euler.append([elset, Orientation.Zrot2OrientationMatrix(x1=x1, x3=x3)]) else: # euler angles phi1 = tokens[irot + 1] Phi = tokens[irot + 2] phi2 = tokens[irot + 3] angles = np.array([float(phi1), float(Phi), float(phi2)]) euler.append([elset, Orientation.from_euler(angles)]) return dict(euler)
[ "def read_orientation_file(path):\n rotationdf = pd.read_csv(\n path,\n sep=' ',\n index_col=0,\n names=['strip', 'direction'],\n header=None\n )\n rotationdf['direction'] = rotationdf['direction'].astype(int)\n return rotationdf", "def orientations(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"orientations\")", "def load_z_scans(files, z_zero = 3.41e6, instrument = 'ashem'):\n \n # Import the meas structure\n meas = scipy.io.loadmat(files[0])['meas']\n \n # Get the z positions (the same for all the data files\n # A-SHeM has positive z towards the pinhole plate\n # B-SHeM has positive z away from the pinhole plate\n if instrument == 'ashem':\n zs = (z_zero - meas['z_positions'][0][0]) \n elif instrument == 'bshem': \n zs = (meas['z_positions'][0][0] - z_zero) \n else: \n raise('Unkown instrument input in load_z_scans') \n for j, item in enumerate(meas['inputs'][0][0][0]):\n if isinstance(item[0], str):\n if item[0] == 'example_pos':\n example_pos = meas['inputs'][0][0][0][j+1][0]\n break\n\n return(zs, meas, example_pos)", "def orientation(self):\r\n tag=self.readinfo('Image Orientation Patient')\r\n \r\n if tag==None:\r\n name=None\r\n elif tag==[-0,1,0,-0,-0,-1]:\r\n name=1 #Sagittal\r\n elif tag==[-1,-0,0,-0,-1,0]:\r\n name=2 #Axial\r\n elif tag==[1,0,0,0,0,-1]:\r\n name=3 #Coronal\r\n else:\r\n name=4 #Oblique\r\n self.orient=name\r\n return", "def orientations(self) -> Sequence[str]:\n return pulumi.get(self, \"orientations\")", "def readOptics(direction,nSkip=1,fileLoc=''):\n if direction == 'x':\n file = open(fileLoc+'fort.24','r')\n elif direction == 'z':\n file = open(fileLoc+'fort.26','r')\n else :\n file = open(fileLoc+'fort.25','r')\n lines = file.readlines()\n file.close()\n f=[]\n ph = 0\n z0 = 0\n for j in range(0,len(lines),nSkip) :\n z, sigmax, sigmap, alpha, emittance_norm = [ float(lines[j].split()[i]) for i in [0,2,4,5,6] ]\n beta=(1+alpha*alpha)**0.5 *sigmax/sigmap\n ph = ph + (z-z0)/beta\n if direction == 'z':\n f.append( [beta, alpha, emittance_norm, ph, z] )\n else:\n f.append( [beta, alpha, emittance_norm, ph, z] )\n z0=z\n return np.array(f)", "def load_file_and_split_by_z(raw_file_name):\n\n data = np.genfromtxt(raw_file_name, comments=\"#\")\n\n # array of the lower redshift bin edges for each GDMF\n z_bins_arr = np.unique(data[:, -2])\n\n gdmf_arr = []\n for zlow in z_bins_arr:\n bdx = data[:, -2] == zlow\n gdmf_arr.append(data[bdx, :-2])\n\n return z_bins_arr, gdmf_arr", "def read_icesat_elev(is_files,RoI):\n\n import h5py\n\n ## Read Icesat data\n print(\"read Icesat data\")\n filelist = glob(is_files)\n filelist.sort()\n\n all_lats = []\n all_lons = []\n all_elev = []\n\n for f in filelist:\n #print(f)\n ds=h5py.File(f,'r')\n lons = np.array(ds['Data_40HZ/Geolocation/d_lon'])\n lats = np.array(ds['Data_40HZ/Geolocation/d_lat'])\n\n inside=geo.points_inside_polygon(lons,lats,RoI)\n if len(inside[inside==True]):\n lons = lons[inside==True]\n lats = lats[inside==True]\n elev = np.array(ds['Data_40HZ/Elevation_Surfaces/d_elev'][inside==True])\n all_lons.extend(lons)\n all_lats.extend(lats)\n all_elev.extend(elev)\n\n all_lons = np.array(all_lons)\n all_lats = np.array(all_lats)\n all_elev = np.array(all_elev)\n \n return all_lons, all_lats, all_elev", "def __readGrain(self, offset):\n sectorOffset = StreamVmdkMedia.__byteOffsetToSectorOffset(offset) #translate the offset in bytes to an offset in sectors\n grainOffset = StreamVmdkMedia.__sectorOffsetToGrainOffset(sectorOffset)\n \n if grainOffset == len(self.__fullGT):\n return self.__incompleteWrittenGrain + StreamVmdkMedia.__padToGrain(self.__incompleteWrittenGrain)\n fileLocation = self.__fullGT[ grainOffset ] * SECTOR_SIZE#get the location in the file where we can find the grain\n \n if fileLocation:\n self.__file.seek( fileLocation + UINT64_BYTE_SIZE)#set the file position to point to the data-length byte of the marker\n compressedLength = struct.unpack(\"=I\", self.__file.read(UINT32_BYTE_SIZE))[0]#extract the required number of bytes\n compressedData = self.__file.read( compressedLength )#read the compressed data\n uncompressedData = zlib.decompress(compressedData)\n if len(uncompressedData) != GRAIN_SIZE:\n logging.critical(\"len(Uncompressed grain) != GRAIN_SIZE\")\n raise VMDKStreamException(\"invalid/corrupted input file! (incorrect grain size)\")\n return uncompressedData#and since we still need to read at least a whole grain we can add all uncompressed data\n else:#null block: add one whole grain of nulls\n return StreamVmdkMedia.__zeroGrain", "def read_tecplot_file_and_correct_for_location_rotation(\n tecplot_file,\n serration_angle = 0,\n angle_correction = 0,\n height_correction = 0,\n streamwise_correction = 0,\n time_step = 0,\n airfoil_normal = False,\n):\n import pandas as pd \n from re import findall\n from copy import copy\n\n # Get available variables\n f = open(tecplot_file,'ro')\n\n # Read the header and get variable and title information from it ###########\n var_flag = False\n dev_flag = False\n for line in f:\n string = findall(\"^VARIABLES[ _A-Za-z0-9,\\\"=]+\",line)\n if string:\n variables = [v.replace(' ','_').replace(\"\\\"\",\"\") \\\n for v in string[0].replace(\"VARIABLES = \",'')\\\n .split(\", \")]\n variables = [v for v in variables if len(v)]\n var_flag = True\n string = findall(\"^TITLE = [ -_A-Za-z0-9,\\\"=]+\",line)\n if string:\n dev_flag = True\n if var_flag and dev_flag:\n break\n f.close()\n ############################################################################\n\n # Put the data into a data frame ###########################################\n df = pd.read_table(\n tecplot_file,\n skiprows = 4,\n names = variables,\n sep = '[ \\t]+',\n index_col = False\n )\n ############################################################################\n df = df.drop('z',1)\n df = rename_df_columns_from_DaVis_to_standard(df)\n\n # Put the coordinate system in the standar direction, not vertical #########\n x = copy(df.x.values)\n y = copy(df.y.values)\n u = copy(df.u.values)\n v = copy(df.v.values)\n df.x = y\n df.y = -x\n df.u = v\n df.v = -u\n ############################################################################\n\n #len_x, len_y = len(df.x.unique()), len(df.y.unique())\n\n rotation_angle = serration_angle + angle_correction\n\n if airfoil_normal:\n rotation_angle = rotation_angle - 11.4\n\n df = correct_flow_plane_df(\n df,\n rotation_angle = rotation_angle,\n height_correction = height_correction,\n streamwise_correction = streamwise_correction,\n )\n\n df = regrid_df(\n df, \n #resolution = ( len_x , len_y )\n resolution = [0.5]\n )\n\n df[ 'time_step' ] = time_step\n\n return df", "def read_geometry_file(path_to_file):\n logger.info(\"Reading geometry file.\")\n with open(path_to_file) as f:\n lines = f.readlines()\n\n vec_x = lines[3].split()\n vec_y = lines[4].split()\n vec_z = lines[5].split()\n\n vec_x = [float(vec_x[i]) for i in range(1, len(vec_x))]\n vec_y = [float(vec_y[i]) for i in range(1, len(vec_y))]\n vec_z = [float(vec_z[i]) for i in range(1, len(vec_z))]\n\n vectors = [vec_x, vec_y, vec_z]\n uc_atoms = []\n for i in range(6, len(lines)):\n sl = lines[i].split()\n x = float(sl[1])\n y = float(sl[2])\n z = float(sl[3])\n t = sl[4]\n\n if sl[4] == \"Ga\":\n c = ga_mass\n elif sl[4] == \"Al\":\n c = al_mass\n elif sl[4] == \"In\":\n c = in_mass\n elif sl[4] == \"O\":\n c = o_mass\n\n global_atom_types[t] = global_atom_types[t] + 1\n\n a = Atom(x, y, z, t, c)\n uc_atoms.append(a)\n logger.info(\"Geomtery file read.\")\n # uc_atoms = UCAtoms(uc_atoms)\n\n return vectors, uc_atoms", "def readGeometry(self):\r\n \r\n lineNum = 0\r\n with open(self.fileName, 'r') as fIn:\r\n # read the geometry file\r\n try:\r\n for line in fIn:\r\n # loop through each line in the file\r\n \r\n # inc the line number\r\n lineNum = lineNum + 1\r\n # parse the line in geometry file, adding info to geometryInfo\r\n self._parseHocGeometryLine(line)\r\n \r\n except IOError as err:\r\n sys.tracebacklimit = 0\r\n raise IOError('Error reading %s line %d: %s' % \\\r\n (self.fileName, lineNum, err.message))\r\n \r\n if self._openFilament:\r\n raise IOError('Error reading %s, filament %s open at end of file' %\r\n (self.fileName, self._openFilament))\r\n \r\n # connect filaments and remove filaments and _connections, leaving segments\r\n # and nodes\r\n self._connectFilaments()\r\n \r\n \r\n # make compartments from hemispheres remaining at the end of unconnected\r\n # segments\r\n #self._addOneNodeCompartments()\r", "def convert_zsa_to_xyz(file):\n with open(file, 'r') as f:\n data = f.readlines()\n\n for i, j in enumerate(data):\n if 'color red' in j:\n red_mention = i\n\n greens = data[1:red_mention]\n reds = data[red_mention+1:]\n\n n_atoms = len(greens) + len(reds)\n xyz_file = file.replace('.zsa', '_z.xyz')\n\n with open(xyz_file, 'w') as f:\n f.write(f'{n_atoms}\\nWritten by Andrew Tarzia!\\n')\n for g in greens:\n id = 'H'\n D = g.rstrip().replace('{', '').replace('}', '')\n x, y, z = [\n i for i in D.replace('point', '').split(' ') if i\n ]\n f.write(f'{id} {x} {y} {z}\\n')\n for g in reds:\n id = 'P'\n D = g.rstrip().replace('{', '').replace('}', '')\n x, y, z = [\n i for i in D.replace('point', '').split(' ') if i\n ]\n f.write(f'{id} {x} {y} {z}\\n')", "def read_isochrones(Ages, Zs, max_gmag = -3.5):\r\n\r\n print('Reading isochrone(s).')\r\n\r\n Z_models_list = np.array([0.00015, 0.00024, 0.00038, 0.00061, 0.00096, 0.00152, 0.00241, 0.00382, 0.00605, 0.0096, 0.0152, 0.0241])\r\n\r\n try:\r\n Z_models_lim = [min(Z_models_list, key=lambda x:abs(x-min(Zs))), min(Z_models_list, key=lambda x:abs(x-max(Zs)))]\r\n Z_models = Z_models_list[(Z_models_list >= Z_models_lim[0]) & (Z_models_list <= Z_models_lim[1])]\r\n except:\r\n Z_models = [min(Z_models_list, key=lambda x:abs(x-Zs))]\r\n\r\n isochrones = []\r\n for Z_model in Z_models:\r\n print('Readding Z:', Z_model)\r\n try:\r\n isochrone = np.loadtxt('./Auxiliary/PARSEC_Tracks/%s.dat'%Z_model, usecols = [7, 1, 3, 23, 24, 25])\r\n except:\r\n print(\"Could not find isochrones in ./Auxiliary/PARSEC_Tracks/\")\r\n sys.exit(1)\r\n \r\n for Age_model in Ages:\r\n Age_models = (np.abs(isochrone[:,1] - Age_model*1e9) == np.amin(np.abs(isochrone[:,1] - Age_model*1e9)))\r\n Max_mag = isochrone[:,3] <= max_gmag\r\n try:\r\n isochrone_age_maxg = isochrone[Age_models & Max_mag] \r\n print('Readding Age:', isochrone_age_maxg[0, 1]*1e-9)\r\n for label in set(isochrone_age_maxg[:, 0]):\r\n evolutionary_state = isochrone_age_maxg[:, 0] == label\r\n isochrones.append(pd.DataFrame(data = {'evolutionary_state': isochrone_age_maxg[evolutionary_state, 0], 'Mass': isochrone_age_maxg[evolutionary_state, 2], 'gmag_0': isochrone_age_maxg[evolutionary_state, 3], 'bpmag_0': isochrone_age_maxg[evolutionary_state, 4], 'rpmag_0': isochrone_age_maxg[evolutionary_state, 5]}))\r\n except:\r\n pass\r\n\r\n return isochrones", "def load_grain(grains, k):\n grain = -np.ones(dims)\n ind = grains[k][0]-1\n [x, y, z] = np.unravel_index(ind, dims, order='F')\n val = grains[k][1]\n grain[y,x,z] = val\n verts, faces = measure.marching_cubes_classic(grain, 0, spacing=(1, 1, 1))\n return verts, faces", "def cubeslice(cubefile='orion_13co.combine.fits', ralim=['5h37m30s', '5h34m30s'], declim=['-6d43m00s', '-5d54m00s'], \nvlim=[0*u.km/u.s, 20.*u.km/u.s], ra_axis=2, dec_axis=1, v_axis=0):\n f = pyfits.open(cubefile)\n head = f[0].header\n #first dimension of f[0].data represents different polarizations, this data\n #only has 1.\n data = f[0].data[0] \n f.close()\n\n rastep, decstep, vstep = head['cdelt1']*u.deg, head['cdelt2']*u.deg, head['cdelt3']*u.m/u.s\n\n rarefpix, decrefpix, vrefpix = head['crpix1'], head['crpix2'], head['crpix3']\n\n raref, decref, vref = head['crval1']*u.deg, head['crval2']*u.deg, head['crval3']*u.m/u.s\n\n ran, decn, vn = head['naxis1'], head['naxis2'], head['naxis3']\n\n ra = np.linspace(raref.value - rastep.value*(rarefpix-1),\n raref.value + rastep.value*(ran-rarefpix), num=ran)*u.deg\n dec = np.linspace(decref.value - decstep.value*(decrefpix-1),\n decref.value + decstep.value*(decn-decrefpix), num=decn)*u.deg\n v = np.linspace(vref.value - vstep.value*(vrefpix-1),\n vref.value + vstep.value*(vn-vrefpix), num=vn)*u.m/u.s\n \n #find indices that correspond to the requested ra,dec,v ranges.\n clo = skycoord(ra=ralim[0], dec=declim[0])\n chi = skycoord(ra=ralim[1], dec=declim[1])\n\n iira = np.where((ra < clo.ra) & (ra > chi.ra))[0] \n iidec = np.where((dec > clo.dec) & (dec < chi.dec))[0]\n iiv = np.where((v >= vlim[0]) & (v <= vlim[1]))[0]\n print iira[0], iidec, iiv\n print type(iira) \n return data[iiv[0]:iiv[-1],iidec[0]:iidec[-1],iira[0]:iira[-1]]", "def readOptics(direction,sample_period=1,fileLoc=''):\n if direction == 'x':\n file = open(fileLoc+'fort.24','r')\n f=data.dictClass({'s':[],'betx':[],'alfx':[],'emitx':[],'phx':[]})\n elif direction == 'y':\n file = open(fileLoc+'fort.25','r')\n f=data.dictClass({'s':[],'bety':[],'alfy':[],'emity':[],'phy':[]})\n elif direction == 'z':\n file = open(fileLoc+'fort.26','r')\n f=data.dictClass({'s':[],'betz':[],'alfz':[],'emitz':[],'phz':[]})\n lines = file.readlines()\n file.close()\n for i in range(len(lines)):\n lines[i] = [ float(lines[i].split()[j]) for j in [0,2,4,5,6] ]\n ph = 0\n s0 = 0\n if direction == 'x':\n j=sample_period-1\n for i in range(len(lines)):\n s, sigmax, sigmap, alpha, emittance_norm = lines[i]\n beta = (1+alpha*alpha)**0.5 *sigmax/sigmap\n ph = ph + (s-s0)/beta\n s0 = s\n j+=1\n if j==sample_period:\n j=0\n f.s.append(s)\n f.betx.append(beta)\n f.alfx.append(alpha)\n f.emitx.append(emittance_norm)\n f.phx.append(ph)\n elif direction == 'y':\n j=sample_period-1\n for i in range(len(lines)):\n s, sigmax, sigmap, alpha, emittance_norm = lines[i]\n beta = (1+alpha*alpha)**0.5 *sigmax/sigmap\n ph = ph + (s-s0)/beta\n s0 = s\n j+=1\n if j==sample_period:\n j=0\n f.s.append(s)\n f.bety.append(beta)\n f.alfy.append(alpha)\n f.emity.append(emittance_norm)\n f.phy.append(ph)\n elif direction == 'z':\n j=sample_period-1\n for i in range(len(lines)):\n s, sigmax, sigmap, alpha, emittance_norm = lines[i]\n beta = (1+alpha*alpha)**0.5 *sigmax/sigmap*1.0e-6\n ph = ph + (s-s0)/beta\n s0 = s\n j+=1\n if j==sample_period:\n j=0\n f.s.append(s)\n f.betz.append(beta)\n f.alfz.append(alpha)\n f.emitz.append(emittance_norm*1.0e6)\n f.phz.append(ph)\n return f", "def readRMS(direction, sample_period=1,fileLoc=''):\n if direction == 'x':\n file = open(fileLoc+'fort.24','r')\n lines = file.readlines()\n file.close()\n f=data.dictClass({'s':[],'centroid_x':[],'rms_x':[],\n 'centroid_px':[],'rms_px':[],\n 'alfx':[],'emitx':[]})\n for j in range(0,len(lines),sample_period) :\n lines[j] = lines[j].split()\n f.s.append(float(lines[j][0]))\n f.centroid_x.append(float(lines[j][1]))\n f.rms_x.append(float(lines[j][2]))\n f.centroid_px.append(float(lines[j][3]))\n f.rms_px.append(float(lines[j][4]))\n f.alfx.append(float(lines[j][5]))\n f.emitx.append(float(lines[j][6]))\n \n elif direction == 'y':\n file = open(fileLoc+'fort.25','r')\n lines = file.readlines()\n file.close()\n f=data.dictClass({'s':[],'centroid_y':[],'rms_y':[],\n 'centroid_py':[],'rms_py':[],\n 'alfy':[],'emity':[]})\n for j in range(0,len(lines),sample_period) :\n lines[j] = lines[j].split()\n f.s.append(float(lines[j][0]))\n f.centroid_y.append(float(lines[j][1]))\n f.rms_y.append(float(lines[j][2]))\n f.centroid_py.append(float(lines[j][3]))\n f.rms_py.append(float(lines[j][4]))\n f.alfy.append(float(lines[j][5]))\n f.emity.append(float(lines[j][6]))\n \n elif direction == 's':\n file = open(fileLoc+'fort.26','r')\n lines = file.readlines()\n file.close()\n f=data.dictClass({'s':[],'centroid_z':[],'rms_z':[],\n 'centroid_pz':[],'rms_pz':[],\n 'alfz':[],'emitz':[]})\n for j in range(0,len(lines),sample_period) :\n lines[j] = lines[j].split()\n f.s.append(float(lines[j][0]))\n f.centroid_z.append(float(lines[j][1]))\n f.rms_z.append(float(lines[j][2]))\n f.centroid_pz.append(float(lines[j][3]))\n f.rms_pz.append(float(lines[j][4]))\n f.alfz.append(float(lines[j][5]))\n f.emitz.append(float(lines[j][6])*1.0e6) # degree-MeV\n for k in f.keys():\n f[k] = np.array(f[k])\n return f", "def load_position_file(self, file_name, fourier_flag = 0, conv_to_rads = 0, indices = None):\r\n values = []\r\n try:\r\n f = open(file_name, \"r\")\r\n index = 0\r\n last_time = 0\r\n last_px = 0\r\n last_py = 0\r\n last_pz = 0\r\n last_vx = 0\r\n last_vy = 0\r\n last_vz = 0\r\n last_roll = 0\r\n last_pitch = 0\r\n last_yaw = 0\r\n for line in f:\r\n if(index == 0):\r\n index += 1\r\n continue\r\n line_val = line.split()\r\n if(not self.isfloat(line_val[0])):\r\n continue\r\n #print(line_val)\r\n wp = float(line_val[0])\r\n time = float(line_val[1])\r\n px = float(line_val[2])\r\n py = float(line_val[3])\r\n pz = float(line_val[4])\r\n roll = float(line_val[5])\r\n pitch = float(line_val[6])\r\n yaw = float(line_val[7])\r\n if(conv_to_rads == 1):\r\n roll = roll/180 * np.pi\r\n pitch = pitch/180 * np.pi\r\n yaw = yaw/180 * np.pi\r\n if(last_time == 0 or (time-last_time) == 0): \r\n values.append(np.array([wp, time\r\n , px, py, pz\r\n , roll, pitch, yaw\r\n , 0, 0, 0\r\n , 0, 0, 0\r\n , 0, 0, 0]))\r\n \r\n else: \r\n #print(\"here\")\r\n vx = (px - last_px)/(time - last_time)\r\n vy = (py - last_py)/(time - last_time)\r\n vz = (pz - last_pz)/(time - last_time)\r\n ax = (vx - last_vx)/(time - last_time)\r\n ay = (vy - last_vy)/(time - last_time)\r\n az = (vz - last_vz)/(time - last_time)\r\n r_dot = (roll - last_roll)/(time - last_time)\r\n p_dot = (pitch - last_pitch)/(time - last_time)\r\n y_dot = (yaw - last_yaw)/(time - last_time)\r\n values.append(np.array([wp, time\r\n , px, py, pz\r\n , roll, pitch, yaw\r\n , vx, vy, vz\r\n , r_dot, p_dot, y_dot\r\n , ax, ay, az])) \r\n last_vx = vx\r\n last_vy = vy\r\n last_vz = vz\r\n last_time = time\r\n last_px = px\r\n last_py = py\r\n last_pz = pz\r\n last_roll = roll\r\n last_pitch = pitch\r\n last_yaw = yaw\r\n index += 1\r\n if(indices is None):\r\n start_index = 0\r\n end_index = len(values)\r\n else:\r\n start_index = indices[0]\r\n end_index = indices[1]\r\n values = np.array(values).T[:,start_index:end_index]\r\n # Get the final movement index. Assume for that the following:\r\n # The position is constant after that index\r\n # This means that change in position is minimal\r\n # Furthermore, change of change is also minimal\r\n # This obviously doesn't work if the platform stands still for \r\n # a while. But it does the job if a vibration is applied to the\r\n # system, to cut off non relevant parts for the fourier transform\r\n # Is probably too fickle. Use manual trimming instead.\r\n# values_rel = values / np.max(np.abs(values),1).reshape(values.shape[0],1)\r\n# d_new_time, d_values = self.get_time_derivative(values[0,:], values)\r\n# d_values_rel = d_values / np.max(np.abs(d_values), 1).reshape((d_values.shape[0],1))\r\n# d_time_matched, d_values_rel_matched = self.interpolate_to_array(values[0,:], d_new_time, d_values_rel)\r\n# dd_new_time, dd_values = self.get_time_derivative(d_time_matched, d_values_rel_matched)\r\n# dd_values_rel = dd_values / np.max(np.abs(dd_values), 1).reshape(dd_values.shape[0],1)\r\n# dd_time_matched, dd_values_rel_matched = self.interpolate_to_array(values[0,:], dd_new_time, d_values_rel)\r\n# end_indices = np.argmin(np.abs(dd_values_rel_matched)\r\n# + np.abs(d_values_rel_matched)\r\n# + np.abs(values_rel)\r\n# - np.abs(values_rel[:,-1]).reshape((values.shape[0],1)), 1)\r\n \r\n# print(end_indices)\r\n# end_index = np.max(end_indices)\r\n# print(end_index)\r\n finally:\r\n f.close()\r\n if(fourier_flag == 1):\r\n# val_for_fourier = values[:,0:end_index]\r\n y_k, x_hz, y_k_abs, y_k_phase = self.calculate_fourier_transforms(values.T).T\r\n return values, x_hz, np.array(y_k), y_k_abs, y_k_phase\r\n else:\r\n return values", "def loadsir(filename):\n\n fid = open(filename)\n data_types = dtype(\"int16\").newbyteorder(\">\")\n data_typec = dtype(\"int8\").newbyteorder(\">\")\n data_typef = dtype(\"float32\").newbyteorder(\">\")\n\n # read header\n head = double(fromfile(fid, dtype=data_types, count=256, sep=\"\"))\n\n nhtype = head[4]\n if nhtype < 20:\n nhtype = 1.0\n head[4] = 1.0\n\n nhead = head[40]\n if nhtype == 1:\n nhead = 1.0\n head[40] = 1.0\n head[41] = 0.0\n head[42] = 0.0\n head[43] = 0.0\n\n ndes = head[41]\n ldes = head[42]\n nia = head[43]\n idatatype = head[47]\n iopt = head[16] # transformation option\n\n if nhtype < 30: # old header format\n # set version 3.0 parameters to header version 2.0 defaults\n if iopt == -1: # image only\n ideg_sc = 10.0\n iscale_sc = 1000.0\n i0_sc = 100.0\n ixdeg_off = 0.0\n iydeg_off = 0.0\n ia0_off = 0.0\n ib0_off = 0.0\n elif iopt == 0: # rectalinear lat/lon\n ideg_sc = 100.0\n iscale_sc = 1000.0\n i0_sc = 100.0\n ixdeg_off = -100.0\n iydeg_off = 0.0\n ia0_off = 0.0\n ib0_off = 0.0\n elif (iopt == 1) or (iopt == 2): # lambert\n ideg_sc = 100.0\n iscale_sc = 1000.0\n i0_sc = 1.0\n ixdeg_off = 0.0\n iydeg_off = 0.0\n ia0_off = 0.0\n ib0_off = 0.0\n elif iopt == 5: # polar stereographic\n ideg_sc = 100.0\n iscale_sc = 100.0\n i0_sc = 1.0\n ixdeg_off = -100.0\n iydeg_off = 0.0\n ia0_off = 0.0\n ib0_off = 0.0\n elif (iopt == 8) or (iopt == 9) or (iopt == 10): # EASE2 grid\n ideg_sc = 10.0\n iscale_sc = 1000.0\n i0_sc = 1.0\n ixdeg_off = 0.0\n iydeg_off = 0.0\n ia0_off = 0.0\n ib0_off = 0.0\n elif (iopt == 11) or (iopt == 12) or (iopt == 13): # EASE grid\n ideg_sc = 10.0\n iscale_sc = 1000.0\n i0_sc = 10.0\n ixdeg_off = 0.0\n iydeg_off = 0.0\n ia0_off = 0.0\n ib0_off = 0.0\n else: # unknown default scaling\n ideg_sc = 100.0\n iscale_sc = 1000.0\n i0_sc = 100.0\n ixdeg_off = 0.0\n iydeg_off = 0.0\n ia0_off = 0.0\n ib0_off = 0.0\n\n head[39] = iscale_sc\n head[126] = ixdeg_off\n head[127] = iydeg_off\n head[168] = ideg_sc\n head[189] = ia0_off\n head[240] = ib0_off\n head[255] = i0_sc\n else: # get projection parameters offset and scale factors\n iscale_sc = head[39]\n ixdeg_off = head[126]\n iydeg_off = head[127]\n ideg_sc = head[168]\n ia0_off = head[189]\n ib0_off = head[240]\n i0_sc = head[255]\n\n # decode projection transformation\n xdeg = head[2] / ideg_sc - ixdeg_off\n ydeg = head[3] / ideg_sc - iydeg_off\n ascale = head[5] / iscale_sc\n bscale = head[6] / iscale_sc\n a0 = head[7] / i0_sc - ia0_off\n b0 = head[8] / i0_sc - ib0_off\n # get special cases which depend on transformation option\n if iopt == -1: # image only\n pass\n elif iopt == 0: # rectalinear lat/lon\n pass\n elif (iopt == 1) or (iopt == 2): # lambert\n ascale = iscale_sc / head[5]\n bscale = iscale_sc / head[6]\n elif iopt == 5: # polar stereographic\n pass\n elif (iopt == 8) or (iopt == 9) or (iopt == 10): # EASE2 grid\n pass\n elif (iopt == 11) or (iopt == 12) or (iopt == 13): # EASE grid\n ascale = 2.0 * (head[5] / iscale_sc) * 6371.228 / 25.067525\n bscale = 2.0 * (head[6] / iscale_sc) * 25.067525\n else: # unknown default scaling\n print(\"*** Unrecognized SIR option in loadsir ***\")\n\n head[2] = xdeg\n head[3] = ydeg\n head[5] = ascale\n head[6] = bscale\n head[7] = a0\n head[8] = b0\n\n if head[10] == 0: # iscale\n head[10] = 1.0\n\n s = 1.0 / head[10]\n soff = 32767.0 / head[10]\n if idatatype == 1:\n soff = 128.0 / head[10]\n\n ioff = head[9]\n anodata = head[48] * s + ioff + soff\n vmin = head[49] * s + ioff + soff\n vmax = head[50] * s + ioff + soff\n\n if idatatype == 4: # floating point file -- very rare\n # fid.close()\n fid2 = open(filename)\n fromfile(fid2, dtype=data_types, count=51, sep=\"\")\n fl = double(fromfile(fid2, dtype=data_typef, count=3, sep=\"\"))\n fid2.close()\n # fid = file(filename)\n # fromfile(fid,dtype=data_types,count=256,sep=\"\")\n anodata = fl[0]\n vmin = fl[1]\n vmax = fl[2]\n\n head[45] = head[45] * 0.1\n head[48] = anodata\n head[49] = vmin\n head[50] = vmax\n\n descrip = []\n iaopt = []\n\n if nhead > 1:\n if ndes > 0:\n descrip = double(fromfile(fid, dtype=data_typec, count=ndes * 512, sep=\"\"))\n descrip = transpose(descrip[1:ldes])\n m, n = descrip.shape\n for j in range(1, n / 2 + 1):\n k = (j - 1) * 2 + 1\n t = descrip[k - 1]\n descrip[k - 1] = descrip[k]\n descrip[k] = t\n if nia > 0:\n nia1 = 256.0 * ceil(nia / 256)\n iaopt = double(fromfile(fid, dtype=data_types, count=nia1, sep=\"\"))\n iaopt = transpose(iaopt[1:nia])\n # read image data\n\n if idatatype == 1: # very rare\n # disp(['Read byte data: ' num2str(head(1)) ' x ' num2str(head(2))]);\n im_in = double(\n fromfile(fid, dtype=data_typec, count=int(head[0] * head[1]), sep=\"\")\n ) # read byte image data\n image = flipud(\n reshape(s * im_in + soff + ioff, (head[1], head[0]), order=\"C\")\n ) # scale data to floating point and\n # change origin location\n elif idatatype == 4: # rare\n # disp(['Read float data: ' num2str(head(1)) ' x ' num2str(head(2))]);\n im_in = double(\n fromfile(fid, dtype=data_typef, count=int(head[0] * head[1]), sep=\"\")\n )\n image = flipud(\n reshape(im_in, (head[1], head[0]), order=\"C\")\n ) # read floating point data\n else: # most commonly used\n # disp(['Read integer data: ' num2str(head(1)) ' x ' num2str(head(2))]);\n im_in = double(\n fromfile(fid, dtype=data_types, count=int(head[0] * head[1]), sep=\"\")\n ) # read integer image data\n image = flipud(\n reshape(s * im_in + soff + ioff, (int(head[1]), int(head[0])), order=\"C\")\n ) # scale data to floating point and\n # change origin location for display\n\n if nhtype == 1: # if old-style header, set default values\n vmin = min(image.flatten(1))\n vmax = max(image.flatten(1))\n anodata = vmin\n head[48] = anodata\n head[49] = vmin\n head[50] = vmax\n if vmin == -32:\n head[18] = 1.0\n elif vmin == -3.2:\n head[18] = 2.0\n\n head[44] = 2.0\n head[45] = 53.0\n\n fid.close()\n return image, head, descrip, iaopt" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the Schmid factor for this crystal orientation and the given slip system.
def schmid_factor(self, slip_system, load_direction=[0., 0., 1]): plane = slip_system.get_slip_plane() gt = self.orientation_matrix().transpose() n_rot = np.dot(gt, plane.normal()) # plane.normal() is a unit vector slip = slip_system.get_slip_direction().direction() slip_rot = np.dot(gt, slip) SF = np.abs(np.dot(n_rot, load_direction) * np.dot(slip_rot, load_direction)) return SF
[ "def calculate_seismic_force(base_shear, floor_weight, floor_height, k):\r\n # Calculate the product of floor weight and floor height\r\n # Note that floor height includes ground floor, which will not be used in the actual calculation.\r\n # Ground floor is stored here for completeness.\r\n weight_floor_height = floor_weight * floor_height[1:, 0]**k\r\n # Equation 12.8-12 in ASCE 7-10\r\n Cvx = weight_floor_height/np.sum(weight_floor_height)\r\n # Calculate the seismic story force\r\n seismic_force = Cvx * base_shear\r\n # Calculate the shear force for each story: from top story to bottom story\r\n story_shear = np.zeros([len(floor_weight), 1])\r\n for story in range(len(floor_weight)-1, -1, -1):\r\n story_shear[story] = np.sum(seismic_force[story:])\r\n\r\n return seismic_force, story_shear", "def compute_seismic_force(self):\r\n # Please note that the period for computing the required strength should be bounded by CuTa\r\n period_for_strength = min(self.elf_parameters['modal period'], self.elf_parameters['period'])\r\n # The period used for computing story drift is not required to be bounded by CuTa\r\n if PERIOD_FOR_DRIFT_LIMIT:\r\n period_for_drift = min(self.elf_parameters['modal period'], self.elf_parameters['period'])\r\n else:\r\n period_for_drift = self.elf_parameters['modal period']\r\n # Call function defined in \"help_functions.py\" to determine the seismic response coefficient\r\n Cs_for_strength = calculate_Cs_coefficient(self.elf_parameters['SDS'], self.elf_parameters['SD1'],\r\n self.elf_parameters['S1'], period_for_strength,\r\n self.elf_parameters['TL'], self.elf_parameters['R'],\r\n self.elf_parameters['Ie'])\r\n Cs_for_drift = calculate_Cs_coefficient(self.elf_parameters['SDS'], self.elf_parameters['SD1'],\r\n self.elf_parameters['S1'], period_for_drift,\r\n self.elf_parameters['TL'], self.elf_parameters['R'],\r\n self.elf_parameters['Ie'])\r\n # Calculate the base shear\r\n base_shear_for_strength = Cs_for_strength * np.sum(self.gravity_loads['floor weight'])\r\n base_shear_for_drift = Cs_for_drift * np.sum(self.gravity_loads['floor weight'])\r\n # Call function defined in \"help_functions.py\" to compute k coefficient\r\n k = determine_k_coeficient(self.elf_parameters['period'])\r\n # Call function defined in \"help_functions.py\" to determine the lateral force for each floor level\r\n lateral_story_force_for_strength, story_shear_for_strength \\\r\n = calculate_seismic_force(base_shear_for_strength, self.gravity_loads['floor weight'], \\\r\n self.geometry['floor height'], k)\r\n lateral_story_force_for_drift, story_shear_for_drift \\\r\n = calculate_seismic_force(base_shear_for_drift, self.gravity_loads['floor weight'], \\\r\n self.geometry['floor height'], k)\r\n # Store information into class attribute\r\n self.seismic_force_for_strength = {'lateral story force': lateral_story_force_for_strength, \\\r\n 'story shear': story_shear_for_strength, \\\r\n 'base shear': base_shear_for_strength, 'Cs': Cs_for_strength}\r\n self.seismic_force_for_drift = {'lateral story force': lateral_story_force_for_drift, \\\r\n 'story shear': story_shear_for_drift, \\\r\n 'base shear': base_shear_for_drift, 'Cs': Cs_for_drift}", "def shear_Reuss(self):\r\n s = self.Sij\r\n return 15 / (4 * (s[0, 0] + s[1, 1] + s[2, 2]) - 4 * (s[0, 1] + s[1, 2] + s[0, 2]) + 3 * (s[3, 3] + s[4, 4] + s[5, 5]))", "def S(phi, _s, s, s_):\n return np.dot(_S(_s, s, s_), phi)", "def slip_correction_factor(self):\n return scf(\n radius=self.particle_radius,\n knu=self.knudsen_number(),\n )", "def getStoichiometricCoefficient(self, spec):\n cython.declare(stoich=cython.int, reactant=Species, product=Species)\n stoich = 0\n for reactant in self.reactants:\n if reactant is spec: stoich -= 1\n for product in self.products:\n if product is spec: stoich += 1\n return stoich", "def convolve(self, sfh):\n # As both the SFH and the SSP (limited to the age of the SFH) data now\n # share the same time grid, the convolution is just a matter of\n # reverting one and computing the sum of the one to one product; this\n # is done using the dot product.\n mass_table = self.mass_table[:, :sfh.size]\n spec_table = self.spec_table[:, :sfh.size]\n\n # The 1.e6 factor is because the SFH is in solar mass per year.\n masses = 1.e6 * np.dot(mass_table, sfh[::-1])\n spectra = 1.e6 * np.dot(spec_table, sfh[::-1])\n\n return masses, spectra", "def calc(self, sid):\n\n\t\tchar_collocates = collocates_manager.get(sid, tpe='character',\n\t\t\trole=self.role, ranks=self.ranks)\n\t\tnoun_collocates = collocates_manager.get(sid, tpe='noun')\n\n\t\t# TODO: Figure out which order ir better here.\n\t\td1 = Probability.gen_dist(noun_collocates, smooth=False)\n\t\td2 = Probability.gen_dist(char_collocates, smooth=False)\n\n\t\treturn Probability.kl_divergence(d1, d2)", "def _sq_mahalanobis(self, x, center, cov_matrix):\n x_center_diff = x - center\n return x_center_diff.dot(np.linalg.inv(cov_matrix)).dot(\n x_center_diff.T)", "def sch(self, peg):\n return self.xyz(peg.ellipsoid).sch(peg)", "def hc_Sf(f, Sf, d=1):\n\n f = f.to(u.Hz)\n Sf = Sf.to(1/u.Hz**3)\n hc = np.sqrt(12*np.pi**2)*np.sqrt(Sf*f**3)\n if d==-1: hc = Sf**2/12/np.pi**2/f**(3/2)\n\n return hc", "def calc(self, sid):\n\n\t\tcollocates = collocates_manager.get(sid, tpe='character',\n\t\t\trole=self.role, ranks=self.ranks)\n\n\t\tdist = Probability.gen_dist(collocates, smooth=False)\n\t\treturn Probability.kurtosis(dist)", "def sch(self, peg):\n if not np.iterable(self.x):\n xyzP = peg.rotation_matrix.T.dot(\n array([self.x,self.y,self.z])-peg.translation_vector)\n else:\n xyzP = peg.rotation_matrix.T.dot(\n array([self.x-peg.translation_vector[0],\n self.y-peg.translation_vector[1],\n self.z-peg.translation_vector[2]]))\n r = np.linalg.norm(xyzP, axis=0)\n h = r - peg.radius\n c = peg.radius * arcsin(xyzP[2] / r)\n s = peg.radius * arctan2(xyzP[1], xyzP[0])\n return SCH(peg, s, c, h)", "def shear(self):\r\n return (self.shear_Voigt + self.shear_Reuss) / 2", "def fermion_massSq(self, X):\n X = np.array(X)\n h, sigma = X[...,0], X[...,1]\n\n #Top quark\n mt_Sq_field = self.yt**2*h*h/2\n mt_Sq_vev = self.yt**2*self.v0**2/2 + 0*h\n \n MSq = np.array([mt_Sq_field])\n MSq_vev = np.array([mt_Sq_vev])\n MSq = np.rollaxis(MSq, 0, len(MSq.shape))\n MSq_vev = np.rollaxis(MSq_vev, 0, len(MSq_vev.shape))\n dof = np.array([12])\n return MSq, MSq_vev, dof", "def calc_ST(Sal):\n return 0.14 * Sal / 1.80655 / 96.062 # mol/kg-SW", "def gramschmidt2ssp(xgs):\n\tU = LieElement(np.pi/4) # Gram-Schmidt basis similarity matrix\n\tV = np.array([[1, 0, 0, 0],\n\t\t\t\t [0, 0, 1, 0],\n\t\t\t [0, 0, 0, 1]],float ) # 4->3 dim reduction matrix\n\t\n\t\n\tx = np.dot(U.transpose(),np.dot(V.transpose(),xgs))\n\treturn x", "def _get_shrink_target_single_factor(self, X: np.ndarray, S: np.ndarray) -> np.ndarray:\n X_mkt = np.nanmean(X, axis=1)\n cov_mkt = np.asarray(X.T.dot(X_mkt) / len(X))\n var_mkt = np.asarray(X_mkt.dot(X_mkt) / len(X))\n F = np.outer(cov_mkt, cov_mkt) / var_mkt\n np.fill_diagonal(F, np.diag(S))\n return F", "def sinh(x):\n\ttry:\n\t\tval = np.sinh(x.val)\n\t\tders = defaultdict(float)\n\t\tsec_ders = defaultdict(float)\n\t\tfor key in x.der:\n\t\t\tders[key] += np.cosh(x.val) * (x.der[key])\n\t\t\tsec_ders[key] += x.sec_der[key]*np.cosh(x.val) + x.der[key]**2*np.sinh(x.val)\n\t\treturn Variable(val, ders, sec_ders)\n\texcept AttributeError:\n\t\treturn np.sinh(x)", "def get_hfs_rates(w,orient_vecs,sphere_raduis):\n u_vec, f_vec, s_vec = orient_vecs\n fly_pos = sphere_raduis*u_vec\n vel_vec = numpy.cross(w,fly_pos)\n head_rate = numpy.dot(u_vec,w)\n forw_rate = numpy.dot(vel_vec, f_vec)\n side_rate = numpy.dot(vel_vec, s_vec)\n return head_rate, forw_rate, side_rate" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute all Schmid factors for this crystal orientation and the given list of slip systems.
def compute_all_schmid_factors(self, slip_systems, load_direction=[0., 0., 1], verbose=False): SF_list = [] for ss in slip_systems: sf = self.schmid_factor(ss, load_direction) if verbose: print('Slip system: %s, Schmid factor is %.3f' % (ss, sf)) SF_list.append(sf) return SF_list
[ "def PSSM_freqs(PSSM_all, pseudocount):\n PSSM_all_psc = PSSM_pseudocount(PSSM_all, pseudocount)\n \n PSSM_all_f = []\n for PSSM in PSSM_all_psc:\n PSSM_colsums = np.sum(PSSM,0,dtype='float')\n PSSM_all_f.append(PSSM / PSSM_colsums)\n \n return(PSSM_all_f)", "def convolve(self, sfh):\n # As both the SFH and the SSP (limited to the age of the SFH) data now\n # share the same time grid, the convolution is just a matter of\n # reverting one and computing the sum of the one to one product; this\n # is done using the dot product.\n mass_table = self.mass_table[:, :sfh.size]\n spec_table = self.spec_table[:, :sfh.size]\n\n # The 1.e6 factor is because the SFH is in solar mass per year.\n masses = 1.e6 * np.dot(mass_table, sfh[::-1])\n spectra = 1.e6 * np.dot(spec_table, sfh[::-1])\n\n return masses, spectra", "def test_stinespring_transpose(self):\n mats = self.unitaries\n chans = [Stinespring(mat) for mat in mats]\n self._compare_transpose_to_operator(chans, mats)", "def get_hfs_rates(w,orient_vecs,sphere_raduis):\n u_vec, f_vec, s_vec = orient_vecs\n fly_pos = sphere_raduis*u_vec\n vel_vec = numpy.cross(w,fly_pos)\n head_rate = numpy.dot(u_vec,w)\n forw_rate = numpy.dot(vel_vec, f_vec)\n side_rate = numpy.dot(vel_vec, s_vec)\n return head_rate, forw_rate, side_rate", "def compute_seismic_force(self):\r\n # Please note that the period for computing the required strength should be bounded by CuTa\r\n period_for_strength = min(self.elf_parameters['modal period'], self.elf_parameters['period'])\r\n # The period used for computing story drift is not required to be bounded by CuTa\r\n if PERIOD_FOR_DRIFT_LIMIT:\r\n period_for_drift = min(self.elf_parameters['modal period'], self.elf_parameters['period'])\r\n else:\r\n period_for_drift = self.elf_parameters['modal period']\r\n # Call function defined in \"help_functions.py\" to determine the seismic response coefficient\r\n Cs_for_strength = calculate_Cs_coefficient(self.elf_parameters['SDS'], self.elf_parameters['SD1'],\r\n self.elf_parameters['S1'], period_for_strength,\r\n self.elf_parameters['TL'], self.elf_parameters['R'],\r\n self.elf_parameters['Ie'])\r\n Cs_for_drift = calculate_Cs_coefficient(self.elf_parameters['SDS'], self.elf_parameters['SD1'],\r\n self.elf_parameters['S1'], period_for_drift,\r\n self.elf_parameters['TL'], self.elf_parameters['R'],\r\n self.elf_parameters['Ie'])\r\n # Calculate the base shear\r\n base_shear_for_strength = Cs_for_strength * np.sum(self.gravity_loads['floor weight'])\r\n base_shear_for_drift = Cs_for_drift * np.sum(self.gravity_loads['floor weight'])\r\n # Call function defined in \"help_functions.py\" to compute k coefficient\r\n k = determine_k_coeficient(self.elf_parameters['period'])\r\n # Call function defined in \"help_functions.py\" to determine the lateral force for each floor level\r\n lateral_story_force_for_strength, story_shear_for_strength \\\r\n = calculate_seismic_force(base_shear_for_strength, self.gravity_loads['floor weight'], \\\r\n self.geometry['floor height'], k)\r\n lateral_story_force_for_drift, story_shear_for_drift \\\r\n = calculate_seismic_force(base_shear_for_drift, self.gravity_loads['floor weight'], \\\r\n self.geometry['floor height'], k)\r\n # Store information into class attribute\r\n self.seismic_force_for_strength = {'lateral story force': lateral_story_force_for_strength, \\\r\n 'story shear': story_shear_for_strength, \\\r\n 'base shear': base_shear_for_strength, 'Cs': Cs_for_strength}\r\n self.seismic_force_for_drift = {'lateral story force': lateral_story_force_for_drift, \\\r\n 'story shear': story_shear_for_drift, \\\r\n 'base shear': base_shear_for_drift, 'Cs': Cs_for_drift}", "def calculate_seismic_force(base_shear, floor_weight, floor_height, k):\r\n # Calculate the product of floor weight and floor height\r\n # Note that floor height includes ground floor, which will not be used in the actual calculation.\r\n # Ground floor is stored here for completeness.\r\n weight_floor_height = floor_weight * floor_height[1:, 0]**k\r\n # Equation 12.8-12 in ASCE 7-10\r\n Cvx = weight_floor_height/np.sum(weight_floor_height)\r\n # Calculate the seismic story force\r\n seismic_force = Cvx * base_shear\r\n # Calculate the shear force for each story: from top story to bottom story\r\n story_shear = np.zeros([len(floor_weight), 1])\r\n for story in range(len(floor_weight)-1, -1, -1):\r\n story_shear[story] = np.sum(seismic_force[story:])\r\n\r\n return seismic_force, story_shear", "def schmidt_coefficients(schmidt_modes):\n return np.array([mode[0] for mode in schmidt_modes])", "def flux_science(self):\n norder = self.sens_dict['norder']\n for iord in range(norder):\n sens_dict_iord = self.sens_dict[str(iord)]\n for sci_obj in self.sci_specobjs:\n if sci_obj.ech_orderindx == iord:\n flux.apply_sensfunc(sci_obj, sens_dict_iord, self.sci_header['AIRMASS'],\n self.sci_header['EXPTIME'], self.spectrograph)\n self.steps.append(inspect.stack()[0][3])", "def buildVectors(stds, path=None, strip=(), det = pt_det_all ):\n\tpath = (path if path else defaultVecPath)\n\tprocStds = {}\n\tstrip = [element(elm) for elm in strip]\n\tstd0 = None\n\tfor elm, std in stds.iteritems():\n\t\tif isinstance(std, str):\n\t\t\tstd = readSpectrum(\"%s/%s\" % (path, std))\n\t\telif isinstance(std, ScriptableSpectrum):\n\t\t\tstd = std.wrapped\n\t\tprocStds[element(elm)] = std\n\t\tif not std0:\n\t\t\tstd0 = wrap(std)\n\tsv = fq.SchamberVectors(det, epq.ToSI.keV(std0.beamEnergy()))\n\tfor elm, std in procStds.iteritems():\n\t\tsv.addStandard(elm, std, elm in strip)\n\treturn sv.getVectorSet()", "def S(phi, _s, s, s_):\n return np.dot(_S(_s, s, s_), phi)", "def SFWHT(X):\r\n # if you dont know, take the input x = [1:8] and you would know \r\n # when m = 3 , this fiunction would do three matrix multiplication task \r\n # x=get_sequency_list(X)\r\n x= np.array(X)\r\n #print('get_sequency_list',x)\r\n M = int(log(x.size,2))\r\n x = x[:(2**M)]\r\n #print('x size',x.size)\r\n\r\n N = x.size\r\n out = x.copy()\r\n for m in range(M):\r\n #print('m',m)\r\n outtemp = out.copy() \r\n step = 2**m\r\n #print('step',step)\r\n numCalc = 2**m\r\n #print('numcalc',numCalc)\r\n for g in range(0,N,2*step): # number of groups\r\n #print('g',g)\r\n\r\n for c in range(numCalc):\r\n #print('c',c)\r\n index = g + c\r\n out[index] = outtemp[index] + outtemp[index+step]\r\n out[index+step] = outtemp[index] - outtemp[index+step]\r\n # print('out',out)\r\n #print ('result:',out/float(N))\r\n return out/float(N)", "def woods_mixing_from_sets(fluidset, satset):\r\n\r\n K_fluid = 0.0\r\n R_fluid = 0.0\r\n\r\n for key in fluidset.keys():\r\n K_fluid = K_fluid + satset[key] / fluidset[key]['K']\r\n R_fluid = R_fluid + satset[key] * fluidset[key]['R']\r\n\r\n K_fluid = 1.0/K_fluid\r\n\r\n return K_fluid, R_fluid", "def schur_vectors(self) -> Optional[np.ndarray]:\n return self._schur_vectors", "def prime_fluidics_system(self):\n\n\t\tself.logging.info(\"%s\\t%i\\t--> Prime fluidics system: [%s]\" % (self.cycle_name, self.flowcell, self.state))\n\t\tself.prime_flowcells() #RCT prime both flowcells with \"Wash\"\n\t\tself.prime_reagent_block() #RCT prime reagent block chambers with \"Wash\" \n\t\tself.prime_flowcells() #RCT prime both flowcells with \"Wash\"", "def _fermion_solver(self):\n self.kcut = len(self.offsets) - 1\n\n nhe, he2idx, idx2he = _heom_state_dictionaries(\n [2] * len(self.flat_ck), self.N_cut\n )\n self.nhe = nhe\n self.he2idx = he2idx\n self.idx2he = idx2he\n \n\n # Separate cases for Hamiltonian and Liouvillian\n if self.isHamiltonian:\n if self.isTimeDep:\n self.N = self.H_sys_list.shape[0]\n self.L = liouvillian(self.H_sys_list[0], []).data\n \n else:\n self.N = self.H_sys.shape[0]\n self.L = liouvillian(self.H_sys, []).data\n \n else:\n \n if self.isTimeDep: \n self.N = int(np.sqrt(self.H_sys_list[0].shape[0])) \n self.L = self.H_sys_list[0].data\n \n else:\n self.N = int(np.sqrt(self.H_sys.shape[0]))\n self.L = self.H_sys.data\n \n self.L_helems = sp.csr_matrix(\n (self.nhe * self.N ** 2, self.nhe * self.N ** 2), dtype=np.complex\n )\n # Set coupling operators\n spreQ = []\n spostQ = []\n spreQdag = []\n spostQdag = []\n for coupOp in self.coup_op:\n spreQ.append(spre(coupOp).data)\n spostQ.append(spost(coupOp).data)\n spreQdag.append(spre(coupOp.dag()).data)\n spostQdag.append(spost(coupOp.dag()).data)\n\n self.spreQ = spreQ\n self.spostQ = spostQ\n self.spreQdag = spreQdag\n self.spostQdag = spostQdag\n # make right hand side\n self.fermion_rhs()\n\n # return output\n return self.L_helems, self.nhe", "def sosfreqz(sos, worN=512, whole=False, fs=2*pi):\n\n sos, n_sections = _validate_sos(sos)\n if n_sections == 0:\n raise ValueError('Cannot compute frequencies with no sections')\n h = 1.\n for row in sos:\n w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole, fs=fs)\n h *= rowh\n return w, h", "def calculate_hemispheres(self):\n\n # Figure out hemisphere of each line\n self.fiber_hemisphere = numpy.zeros(self.number_of_fibers)\n # percentage in left hemisphere\n test = sum(self.fiber_array_r.T < 0) / float(self.points_per_fiber)\n self.fiber_hemisphere[numpy.nonzero(test > 0.95)] = -1\n self.fiber_hemisphere[numpy.nonzero(test < 0.05)] = 1\n # otherwise hem stays 0 for commissural\n\n # output boolean arrays for each hemisphere and callosal fibers\n self.is_left_hem = (self.fiber_hemisphere == -1)\n self.is_right_hem = (self.fiber_hemisphere == 1)\n self.is_commissure = (self.fiber_hemisphere == 0)\n\n # output indices of each type above\n self.index_left_hem = numpy.nonzero(self.is_left_hem)[0]\n self.index_right_hem = numpy.nonzero(self.is_right_hem)[0]\n self.index_commissure = numpy.nonzero(self.is_commissure)[0]\n self.index_hem = \\\n numpy.nonzero(self.is_left_hem | self.is_right_hem)[0]\n\n # output totals of each type also\n self.number_left_hem = len(self.index_left_hem)\n self.number_right_hem = len(self.index_right_hem)\n self.number_commissure = len(self.index_commissure)\n\n # test\n if __debug__:\n test = self.number_of_fibers == \\\n (self.number_left_hem + self.number_right_hem \\\n + self.number_commissure)\n if not test:\n print \"<fibers.py> ERROR: fiber numbers don't add up.\"\n raise AssertionError", "def cal_Sfs(signal1, signal2, fmin, fmax):\n update()\n points = get_points([signal1.samples, signal2.samples])\n fs1 = FAS(signal1.velo, signal1.dt, points, fmin, fmax, 3)[-1]\n fs2 = FAS(signal2.velo, signal2.dt, points, fmin, fmax, 3)[-1]\n s = np.array([], float)\n\n for i in range(0, fs1.size):\n s = np.append(s, S(fs1[i], fs2[i]))\n\n # print s.size\n # print np.mean(s)\n Sfs = np.mean(s)\n return Sfs", "def test_stinespring_conjugate(self):\n mats = self.unitaries\n chans = [Stinespring(mat) for mat in mats]\n self._compare_conjugate_to_operator(chans, mats)", "def apply_symplectic(self, S, qubits):\n # Approach 1: convert the 2m x 2m symplectic matrix S to a 2n x 2n\n # matrix that acts on the corresponding columns in qubits\n # M = decompose.symplectic_to_matrix(S, self.n, qubits)\n # self.state = (self.state @ M) % 2\n\n # Approach 2: decompose the 2m x 2m symplectic matrix into a\n # series of {C, H, P} gates, then apply those\n # NOTE: this is actually much faster in practice for large n\n m = len(qubits)\n gates = decompose.decompose_state(CHP_Simulation(m, S))\n gates = decompose.change_gates(gates, qubits)\n decompose.apply_gates(gates, self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the Schmid factor of this grain for the given slip system.
def schmid_factor(self, slip_system, load_direction=[0., 0., 1]): plane = slip_system.get_slip_plane() gt = self.orientation_matrix().transpose() n_rot = np.dot(gt, plane.normal()) # plane.normal() is a unit vector slip = slip_system.get_slip_direction().direction() slip_rot = np.dot(gt, slip) SF = np.abs(np.dot(n_rot, load_direction) * np.dot(slip_rot, load_direction)) return self.orientation.schmid_factor(slip_system, load_direction)
[ "def shear_Reuss(self):\r\n s = self.Sij\r\n return 15 / (4 * (s[0, 0] + s[1, 1] + s[2, 2]) - 4 * (s[0, 1] + s[1, 2] + s[0, 2]) + 3 * (s[3, 3] + s[4, 4] + s[5, 5]))", "def shear(self):\r\n return (self.shear_Voigt + self.shear_Reuss) / 2", "def calculate_seismic_force(base_shear, floor_weight, floor_height, k):\r\n # Calculate the product of floor weight and floor height\r\n # Note that floor height includes ground floor, which will not be used in the actual calculation.\r\n # Ground floor is stored here for completeness.\r\n weight_floor_height = floor_weight * floor_height[1:, 0]**k\r\n # Equation 12.8-12 in ASCE 7-10\r\n Cvx = weight_floor_height/np.sum(weight_floor_height)\r\n # Calculate the seismic story force\r\n seismic_force = Cvx * base_shear\r\n # Calculate the shear force for each story: from top story to bottom story\r\n story_shear = np.zeros([len(floor_weight), 1])\r\n for story in range(len(floor_weight)-1, -1, -1):\r\n story_shear[story] = np.sum(seismic_force[story:])\r\n\r\n return seismic_force, story_shear", "def SFR_per_tff(self):\n self.sfr_ff = 0.073 * self.alpha_vir_summed**(-0.68) * np.mean(self.Mach_vec)**(-0.32)\n self.SFR = self.sfr_ff * self.mass_Msun / self.tff_Myr\n\n # Star formation rate as in Joung & Mac Low 2006.\n if (self.mass_Msun / self.M_jeans > 1.0):\n self.SFR_JML = 0.3 * self.mass_Msun / self.tff_Myr\n else:\n self.SFR_JML = 0.0e6", "def SpreadFactor(self): \n return 4.5", "def getStoichiometricCoefficient(self, spec):\n cython.declare(stoich=cython.int, reactant=Species, product=Species)\n stoich = 0\n for reactant in self.reactants:\n if reactant is spec: stoich -= 1\n for product in self.products:\n if product is spec: stoich += 1\n return stoich", "def slip_correction_factor(self):\n return scf(\n radius=self.particle_radius,\n knu=self.knudsen_number(),\n )", "def Strehl(Fin):\n normsq = _np.abs(Fin.field).sum()**2\n if normsq == 0.0:\n raise ValueError('Error in Strehl: Zero beam power')\n strehl = _np.real(Fin.field).sum()**2 + _np.imag(Fin.field).sum()**2\n strehl = strehl/normsq\n return strehl", "def sch(self, peg):\n if not np.iterable(self.x):\n xyzP = peg.rotation_matrix.T.dot(\n array([self.x,self.y,self.z])-peg.translation_vector)\n else:\n xyzP = peg.rotation_matrix.T.dot(\n array([self.x-peg.translation_vector[0],\n self.y-peg.translation_vector[1],\n self.z-peg.translation_vector[2]]))\n r = np.linalg.norm(xyzP, axis=0)\n h = r - peg.radius\n c = peg.radius * arcsin(xyzP[2] / r)\n s = peg.radius * arctan2(xyzP[1], xyzP[0])\n return SCH(peg, s, c, h)", "def compute_seismic_force(self):\r\n # Please note that the period for computing the required strength should be bounded by CuTa\r\n period_for_strength = min(self.elf_parameters['modal period'], self.elf_parameters['period'])\r\n # The period used for computing story drift is not required to be bounded by CuTa\r\n if PERIOD_FOR_DRIFT_LIMIT:\r\n period_for_drift = min(self.elf_parameters['modal period'], self.elf_parameters['period'])\r\n else:\r\n period_for_drift = self.elf_parameters['modal period']\r\n # Call function defined in \"help_functions.py\" to determine the seismic response coefficient\r\n Cs_for_strength = calculate_Cs_coefficient(self.elf_parameters['SDS'], self.elf_parameters['SD1'],\r\n self.elf_parameters['S1'], period_for_strength,\r\n self.elf_parameters['TL'], self.elf_parameters['R'],\r\n self.elf_parameters['Ie'])\r\n Cs_for_drift = calculate_Cs_coefficient(self.elf_parameters['SDS'], self.elf_parameters['SD1'],\r\n self.elf_parameters['S1'], period_for_drift,\r\n self.elf_parameters['TL'], self.elf_parameters['R'],\r\n self.elf_parameters['Ie'])\r\n # Calculate the base shear\r\n base_shear_for_strength = Cs_for_strength * np.sum(self.gravity_loads['floor weight'])\r\n base_shear_for_drift = Cs_for_drift * np.sum(self.gravity_loads['floor weight'])\r\n # Call function defined in \"help_functions.py\" to compute k coefficient\r\n k = determine_k_coeficient(self.elf_parameters['period'])\r\n # Call function defined in \"help_functions.py\" to determine the lateral force for each floor level\r\n lateral_story_force_for_strength, story_shear_for_strength \\\r\n = calculate_seismic_force(base_shear_for_strength, self.gravity_loads['floor weight'], \\\r\n self.geometry['floor height'], k)\r\n lateral_story_force_for_drift, story_shear_for_drift \\\r\n = calculate_seismic_force(base_shear_for_drift, self.gravity_loads['floor weight'], \\\r\n self.geometry['floor height'], k)\r\n # Store information into class attribute\r\n self.seismic_force_for_strength = {'lateral story force': lateral_story_force_for_strength, \\\r\n 'story shear': story_shear_for_strength, \\\r\n 'base shear': base_shear_for_strength, 'Cs': Cs_for_strength}\r\n self.seismic_force_for_drift = {'lateral story force': lateral_story_force_for_drift, \\\r\n 'story shear': story_shear_for_drift, \\\r\n 'base shear': base_shear_for_drift, 'Cs': Cs_for_drift}", "def calc_ST(Sal):\n return 0.14 * Sal / 1.80655 / 96.062 # mol/kg-SW", "def fermion_massSq(self, X):\n X = np.array(X)\n h, sigma = X[...,0], X[...,1]\n\n #Top quark\n mt_Sq_field = self.yt**2*h*h/2\n mt_Sq_vev = self.yt**2*self.v0**2/2 + 0*h\n \n MSq = np.array([mt_Sq_field])\n MSq_vev = np.array([mt_Sq_vev])\n MSq = np.rollaxis(MSq, 0, len(MSq.shape))\n MSq_vev = np.rollaxis(MSq_vev, 0, len(MSq_vev.shape))\n dof = np.array([12])\n return MSq, MSq_vev, dof", "def _get_shrink_target_single_factor(self, X: np.ndarray, S: np.ndarray) -> np.ndarray:\n X_mkt = np.nanmean(X, axis=1)\n cov_mkt = np.asarray(X.T.dot(X_mkt) / len(X))\n var_mkt = np.asarray(X_mkt.dot(X_mkt) / len(X))\n F = np.outer(cov_mkt, cov_mkt) / var_mkt\n np.fill_diagonal(F, np.diag(S))\n return F", "def sas(mol: SmallMolecule) -> float:\n return _sas(mol)", "def _calculate_max_sharpe_weights(self):\r\n\r\n weights = inv(self.posterior_covariance).dot(self.posterior_expected_returns.T)\r\n weights /= sum(weights)\r\n return weights", "def hc_Sf(f, Sf, d=1):\n\n f = f.to(u.Hz)\n Sf = Sf.to(1/u.Hz**3)\n hc = np.sqrt(12*np.pi**2)*np.sqrt(Sf*f**3)\n if d==-1: hc = Sf**2/12/np.pi**2/f**(3/2)\n\n return hc", "def sinh(x):\n\ttry:\n\t\tval = np.sinh(x.val)\n\t\tders = defaultdict(float)\n\t\tsec_ders = defaultdict(float)\n\t\tfor key in x.der:\n\t\t\tders[key] += np.cosh(x.val) * (x.der[key])\n\t\t\tsec_ders[key] += x.sec_der[key]*np.cosh(x.val) + x.der[key]**2*np.sinh(x.val)\n\t\treturn Variable(val, ders, sec_ders)\n\texcept AttributeError:\n\t\treturn np.sinh(x)", "def sch(self, peg):\n return self.xyz(peg.ellipsoid).sch(peg)", "def gramschmidt2ssp(xgs):\n\tU = LieElement(np.pi/4) # Gram-Schmidt basis similarity matrix\n\tV = np.array([[1, 0, 0, 0],\n\t\t\t\t [0, 0, 1, 0],\n\t\t\t [0, 0, 0, 1]],float ) # 4->3 dim reduction matrix\n\t\n\t\n\tx = np.dot(U.transpose(),np.dot(V.transpose(),xgs))\n\treturn x", "def calc_FT(Sal):\n return 6.7e-5 * Sal / 1.80655 / 18.9984 # mol/kg-SW" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the VTK mesh of this grain.
def SetVtkMesh(self, mesh): self.vtkmesh = mesh
[ "def part_mesh(self, part_mesh):\n\n part_mesh = np.array(part_mesh)\n\n if part_mesh.shape[0] != self._num_parts:\n raise ValueError(\"Size of part mesh invalid!\")\n\n self._part_mesh = part_mesh", "def set_mesh(self, mesh_dim, typ_elem, ndp, nptfr, nptir, nelem, npoin,\n ikles, ipobo, knolg, coordx, coordy, nplan, date,\n time, coordz=None):\n if coordz is None:\n tmp_z = np.zeros((npoin))\n else:\n tmp_z = coordz\n\n self.typ_elem = typ_elem\n\n tmp_ikle = ikles.T.reshape((nelem*ndp)) + 1\n\n self.logger.debug(\"Writing mesh information\")\n self.error = HermesFile._hermes.set_mesh(\\\n self.fformat, self.my_id,\n mesh_dim, typ_elem, ndp, nptfr,\n nptir, nelem, tmp_ikle,\n ipobo, knolg, coordx, coordy,\n nplan, date, time, npoin, tmp_z)\n del tmp_ikle\n if coordz is None:\n del tmp_z", "def UpdateMesh(self):\r\n pass", "def _generate_mesh(self):\n self._mesh_points = self._make_pos()", "def setMesh(self, mesh, **kwargs): # secondaryNodes=3):\n super().setMesh(mesh, **kwargs) # ignoreRegionManager=True)\n print(self.mesh(), self.mesh().secondaryNodeCount())\n self.mids = pg.IVector()\n self.nnodes = self.mesh().nodeCount()\n for c in self.mesh().cells():\n n = self.mesh().createSecondaryNode(c.center())\n c.addSecondaryNode(n)\n self.mids.push_back(n.id())\n\n print(self.mesh())", "def set_mesh(self,\n OPTREADINPUT=10,\n OPTINITMESHDENS=0,\n X_GRID_SIZE=10000.,\n Y_GRID_SIZE=10000.,\n OPT_PT_PLACE=1,\n GRID_SPACING=200.,\n NUM_PTS='n/a',\n INPUTDATAFILE='n/a',\n INPUTTIME='n/a',\n OPTREADLAYER=0,\n POINTFILENAME='n/a',\n ARCGRIDFILENAME='n/a',\n TILE_INPUT_PATH='n/a',\n OPT_TILES_OR_SINGLE_FILE=0,\n LOWER_LEFT_EASTING='n/a',\n LOWER_LEFT_NORTHING='n/a',\n NUM_TILES_EAST='n/a',\n NUM_TILES_NORTH='n/a',\n OPTMESHADAPTDZ=0,\n MESHADAPT_MAXNODEFLUX='n/a',\n OPTMESHADAPTAREA=0,\n MESHADAPTAREA_MINAREA='n/a',\n MESHADAPTAREA_MAXVAREA='n/a'):\n self.parameters['OPTREADINPUT'] = OPTREADINPUT\n self.parameters['OPTINITMESHDENS'] = OPTINITMESHDENS\n self.parameters['X_GRID_SIZE'] = X_GRID_SIZE\n self.parameters['Y_GRID_SIZE'] = Y_GRID_SIZE\n self.parameters['OPT_PT_PLACE'] = OPT_PT_PLACE\n self.parameters['GRID_SPACING'] = GRID_SPACING\n self.parameters['NUM_PTS'] = NUM_PTS\n self.parameters['INPUTDATAFILE'] = INPUTDATAFILE\n self.parameters['INPUTTIME'] = INPUTTIME\n self.parameters['OPTREADLAYER'] = OPTREADLAYER\n self.parameters['POINTFILENAME'] = POINTFILENAME\n self.parameters['ARCGRIDFILENAME'] = ARCGRIDFILENAME\n self.parameters['TILE_INPUT_PATH'] = TILE_INPUT_PATH\n self.parameters['OPT_TILES_OR_SINGLE_FILE'] = OPT_TILES_OR_SINGLE_FILE\n self.parameters['LOWER_LEFT_EASTING'] = LOWER_LEFT_EASTING\n self.parameters['LOWER_LEFT_NORTHING'] = LOWER_LEFT_NORTHING\n self.parameters['NUM_TILES_EAST'] = NUM_TILES_EAST\n self.parameters['NUM_TILES_NORTH'] = NUM_TILES_NORTH\n self.parameters['OPTMESHADAPTDZ'] = OPTMESHADAPTDZ\n self.parameters['MESHADAPT_MAXNODEFLUX'] = MESHADAPT_MAXNODEFLUX\n self.parameters['OPTMESHADAPTAREA'] = OPTMESHADAPTAREA\n self.parameters['MESHADAPTAREA_MINAREA'] = MESHADAPTAREA_MINAREA\n self.parameters['MESHADAPTAREA_MAXVAREA'] = MESHADAPTAREA_MAXVAREA", "def setFaces(self, faces, deep=0):\n self._faces = np.c_[np.tile(faces.shape[1], faces.shape[0]),\n faces].flatten().astype(np.int64)\n self._f = numpy_support.numpy_to_vtkIdTypeArray(self._faces, deep=deep)\n self.polys.SetCells(len(faces), self._f)\n self.mesh.SetPolys(self.polys)", "def set_mesh_geometry(self,buffer,rotation):\n length = np.max(self.maximum-self.origin)\n # origin = self.builders[0].interpolator.support.origin\n # maximum = self.builders[0].interpolator.support.maximum#set_interpolation_geometry\n # if origin[2]>self.origin[2]:\n # origin[2]=self.origin[2]\n # if maximum[2]<self.maximum[2]:\n # maximum[2]=self.maximum[2]\n # self.builders[0].set_interpolation_geometry(origin,maximum)\n # for builder in self.builders:\n # all three coordinates share the same support\n self.builders[0].set_interpolation_geometry(self.origin-length*buffer,self.maximum+length*buffer,rotation)", "def set_inputmesh(self, meshname=None, meshfilename=None,\n fields_to_transfer=None):\n self.fields_to_transfer = []\n if meshname is not None:\n self.data_inputmesh = meshname\n if meshfilename is not None:\n p = Path(meshfilename).absolute()\n self.input_meshfile = p.parent / f'{p.stem}.geof'\n self.set_script_args(input_meshfile=str(self.input_meshfile))\n if fields_to_transfer is not None:\n self.fields_to_transfer = fields_to_transfer\n return", "def on_set_mesh_text( self, event ):\r\n\r\n\t\tif self.selected_mesh > -1:\r\n\t\t\ttext_value = self.cmesh_text.GetValue( )\r\n\t\t\tif not text_value.startswith( '*.' ):\r\n\t\t\t\tif self.bone_weights[ self.selected_mesh ]:\r\n\t\t\t\t\tself.cmeshx_files[ self.selected_mesh ] = text_value\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.smeshx_files[ self.selected_mesh ] = text_value", "def load_pytorch_mesh_from_file(self):\n self.pytorch_mesh = load_objs_as_meshes([self.mesh_path]).cuda()", "def SetData(self, vtkDataArray, vtkDataArray_1):\n ...", "def set_mesh_vertex(self, pos: tuple[int, int], point: UVec, dxfattribs=None):\n dxfattribs = dict(dxfattribs or {})\n dxfattribs[\"location\"] = point\n vertex = self.get_mesh_vertex(pos)\n vertex.update_dxf_attribs(dxfattribs)", "def calculate_change_mesh(self):", "def set_MeshPoints(self,startPoint,endPoint,NoNodes):\n a,b,N = (startPoint,endPoint,NoNodes)\n self.N = N\n self.h = (b-a)/N\n self.a = a\n self.b = b", "def setAsBackgroundMesh(tag):\n ierr = c_int()\n lib.gmshModelMeshFieldSetAsBackgroundMesh(\n c_int(tag),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelMeshFieldSetAsBackgroundMesh returned non-zero error code: \",\n ierr.value)", "def set_outputmesh(self, meshname=None, meshfilename=None):\n if meshname is not None:\n self.data_outputmesh = meshname\n if meshfilename is not None:\n p = Path(meshfilename).absolute()\n self.output_meshfile = p.parent / f'{p.stem}.geof'\n self.set_script_args(output_meshfile=str(self.output_meshfile))\n return", "def load_trimesh_from_file(self):\n self.mesh = trimesh.load(self.mesh_path,process=False)", "def on_select_mesh( self, event ):\r\n\r\n\t\t# Get the selection name\r\n\t\tselection = self.mesh_list.GetSelection()\r\n\r\n\t\tif selection > -1:\r\n\t\t\tselected_string = None\r\n\t\t\ttry:\r\n\t\t\t\tselected_string = self.mesh_list.GetString( selection )\r\n\t\t\texcept AssertionError:\r\n\t\t\t\tpass\r\n\t\t\texcept IndexError:\r\n\t\t\t\tpass\r\n\r\n\t\t\tif selected_string:\r\n\t\t\t\tselected_string = str( selected_string.strip( ) )\r\n\t\t\t\tself.selected_mesh = selection\r\n\t\t\t\tself.mesh_selection = selection\r\n\r\n\t\tself.update_ui( )", "def setUp(self):\n\n data = np.ones((5, 5), dtype=np.float32)\n data[2, 2] = 0\n self.cube = set_up_variable_cube(data, spatial_grid=\"equalarea\",)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a mesh to this grain. This method process a labeled array to extract the geometry of the grain. The grain shape is defined by the pixels with a value of the grain id. A vtkUniformGrid object is created and thresholded or contoured depending on the value of the flag `contour`. The resulting mesh is returned, centered on the center of mass of the grain.
def add_vtk_mesh(self, array, contour=True, verbose=False): label = self.id # we use the grain id here... # create vtk structure from scipy import ndimage from vtk.util import numpy_support grain_size = np.shape(array) array_bin = (array == label).astype(np.uint8) local_com = ndimage.measurements.center_of_mass(array_bin, array) vtk_data_array = numpy_support.numpy_to_vtk(np.ravel(array_bin, order='F'), deep=1) grid = vtk.vtkUniformGrid() grid.SetOrigin(-local_com[0], -local_com[1], -local_com[2]) grid.SetSpacing(1, 1, 1) if vtk.vtkVersion().GetVTKMajorVersion() > 5: grid.SetScalarType(vtk.VTK_UNSIGNED_CHAR, vtk.vtkInformation()) else: grid.SetScalarType(vtk.VTK_UNSIGNED_CHAR) if contour: grid.SetExtent(0, grain_size[0] - 1, 0, grain_size[1] - 1, 0, grain_size[2] - 1) grid.GetPointData().SetScalars(vtk_data_array) # contouring selected grain contour = vtk.vtkContourFilter() if vtk.vtkVersion().GetVTKMajorVersion() > 5: contour.SetInputData(grid) else: contour.SetInput(grid) contour.SetValue(0, 0.5) contour.Update() if verbose: print(contour.GetOutput()) self.SetVtkMesh(contour.GetOutput()) else: grid.SetExtent(0, grain_size[0], 0, grain_size[1], 0, grain_size[2]) grid.GetCellData().SetScalars(vtk_data_array) # threshold selected grain thresh = vtk.vtkThreshold() thresh.ThresholdBetween(0.5, 1.5) # thresh.ThresholdBetween(label-0.5, label+0.5) if vtk.vtkVersion().GetVTKMajorVersion() > 5: thresh.SetInputData(grid) else: thresh.SetInput(grid) thresh.Update() if verbose: print('thresholding label: %d' % label) print(thresh.GetOutput()) self.SetVtkMesh(thresh.GetOutput())
[ "def add_mesh(cube, url):\n from pyugrid import UGrid\n ug = UGrid.from_ncfile(url)\n cube.mesh = ug\n cube.mesh_dimension = 1\n return cube", "def add_mesh(self, **args):\n if \"filename\" not in args:\n raise KeyError(\"No filename given\")\n self.meshfiles.append((args[\"filename\"], self._check_ax_sym(args)))", "def add_polyhedron(self, mesh: MeshBuilder) -> None:\n self.add(meshex.scad_dumps(mesh))", "def _update_mesh(self, i):\n\n # Determine which lattice cells the sphere is in and remove the\n # sphere id from those cells\n for idx in self.mesh_map[i]:\n self.mesh[idx].remove(i)\n del self.mesh_map[i]\n\n # Determine which lattice cells are within one diameter of sphere's\n # center and add this sphere to the list of spheres in those cells\n for idx in self._cell_list_cube(self.spheres[i], self.diameter):\n self.mesh[idx].add(i)\n self.mesh_map[i].add(idx)", "def add(fieldType, tag=-1):\n ierr = c_int()\n api__result__ = lib.gmshModelMeshFieldAdd(\n c_char_p(fieldType.encode()),\n c_int(tag),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelMeshFieldAdd returned non-zero error code: \",\n ierr.value)\n return api__result__", "def build_mesh(directory, mesh_divisions):\n\n # Get bounds of mesh.\n maxy, maxx, miny, minx = get_bounds(directory)\n\n # X and Y divisions counts.\n nx = mesh_divisions\n ny = mesh_divisions\n\n # X and Y divisions size.\n dx = abs(maxx - minx) / nx\n dy = abs(maxy - miny) / ny\n\n # Init mesh list and id counter.\n crs = {'init': 'epsg:4326'}\n mesh = gpd.GeoDataFrame(crs=crs)\n r_id = 0\n\n # For every \"row\" (lattitude) division:\n for i in range(ny):\n\n # For every \"column\" (longitude) division:\n for j in range(nx):\n\n # Init poly coors.\n vertices = []\n\n # Southwest corner coordinate:\n vertices.append([min(minx+dx*j,maxx),max(maxy-dy*i,miny)])\n\n # Southeast corner coordinate:\n vertices.append([min(minx+dx*(j+1),maxx),max(maxy-dy*i,miny)])\n\n # Northeast corner coordinate:\n vertices.append([min(minx+dx*(j+1),maxx),max(maxy-dy*(i+1),miny)])\n\n # Northwest corner coordinate:\n vertices.append([min(minx+dx*j,maxx),max(maxy-dy*(i+1),miny)])\n\n # Close loop, Southwest corner coordinate:\n vertices.append([min(minx+dx*j,maxx),max(maxy-dy*i,miny)])\n\n # Turn into a shapely Polygon\n r_poly = Polygon(vertices)\n\n # Init GeoSeries with Polygon\n r_series = gpd.GeoSeries(r_poly)\n r_series.name = r_id\n\n # Append Series to Mesh GeoDataFrame\n mesh = mesh.append(r_series)\n\n # Increase id.\n r_id += 1\n\n # Set gemotry.\n mesh = mesh.rename(columns={0: 'geometry'}).set_geometry('geometry')\n\n # Rotate the mesh.\n pass\n\n # Return the GeoDataFrame\n return mesh", "def from_meshio(mesh):\n from meshio.vtk._vtk import (\n meshio_to_vtk_type,\n vtk_type_to_numnodes,\n )\n\n # Extract cells from meshio.Mesh object\n offset = []\n cells = []\n cell_type = []\n next_offset = 0\n for c in mesh.cells:\n vtk_type = meshio_to_vtk_type[c.type]\n numnodes = vtk_type_to_numnodes[vtk_type]\n offset += [next_offset + i * (numnodes + 1) for i in range(len(c.data))]\n cells.append(\n np.hstack((np.full((len(c.data), 1), numnodes), c.data)).ravel()\n )\n cell_type += [vtk_type] * len(c.data)\n next_offset = offset[-1] + numnodes + 1\n\n # Extract cell data from meshio.Mesh object\n cell_data = {k: np.concatenate(v) for k, v in mesh.cell_data.items()}\n\n # Create pyvista.UnstructuredGrid object\n points = mesh.points\n if points.shape[1] == 2:\n points = np.hstack((points, np.zeros((len(points), 1))))\n\n grid = pyvista.UnstructuredGrid(\n np.array(offset),\n np.concatenate(cells),\n np.array(cell_type),\n np.array(points, np.float64),\n )\n\n # Set point data\n grid.point_arrays.update({k: np.array(v, np.float64) for k, v in mesh.point_data.items()})\n\n # Set cell data\n grid.cell_arrays.update(cell_data)\n\n return grid", "def create_mesh_data(self):\n\n # if len(self.physical_surfaces) > 1:\n # self.geom.boolean_union(self.physical_surfaces)\n\n self.__physical_surfaces__()\n\n directory = os.getcwd() + '/debug/gmsh/'\n\n mesh_file = '{}{}.msh'.format(directory, self.filename)\n geo_file = '{}{}.geo'.format(directory, self.filename)\n vtk_file = '{}{}.vtu'.format(directory, self.filename)\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n mesh_data = pygmsh.generate_mesh(\n self.geom, verbose=False, dim=2,\n prune_vertices=False,\n remove_faces=False,\n geo_filename=geo_file\n )\n\n # meshio.write(mesh_file, mesh_data)\n # meshio.write(vtk_file, mesh_data)\n\n return mesh_data", "def unstructured_mesh(fname, sizing, convert):\n geo_tools.prep_mesh_config(\n fname + \"Morphology.geo\", fname + \"UMesh.geo\", sizing)\n mesh_domain(fname + \"UMesh.geo\")\n if convert:\n convert_mesh(fname + \"UMesh.msh\", fname + \"UMesh.xml\")", "def add_mesh_parameters(sumo_file_path, refine_level=0.0):\n\n refine_ratio = 0.6 # to get approx. double mesh cell when +1 on \"refine_level\"\n refine_factor = refine_ratio**refine_level\n log.info('Refinement factor is {}'.format(refine_factor))\n\n # Open SUMO (.smx) with tixi library\n sumo = cpsf.open_tixi(sumo_file_path)\n ROOT_XPATH = '/Assembly'\n\n # Get all Body (fuselage) and apply mesh parameters\n if sumo.checkElement(ROOT_XPATH):\n body_cnt = sumo.getNamedChildrenCount(ROOT_XPATH, 'BodySkeleton')\n log.info(str(body_cnt) + ' body has been found.')\n else:\n body_cnt = 0\n log.warning('No Fuselage has been found in this SUMO file!')\n\n for i_body in range(body_cnt):\n body_xpath = ROOT_XPATH + '/BodySkeleton[' + str(i_body+1) + ']'\n\n circ_list = []\n min_radius = 10e6\n\n # Go throught every Boby frame (fuselage sections)\n frame_cnt = sumo.getNamedChildrenCount(body_xpath, 'BodyFrame')\n for i_sec in range(frame_cnt):\n frame_xpath = body_xpath + '/BodyFrame[' + str(i_sec+1) + ']'\n\n # Estimate circumference and add to the list\n height = sumo.getDoubleAttribute(frame_xpath,'height')\n width = sumo.getDoubleAttribute(frame_xpath,'width')\n circ = 2 * math.pi * math.sqrt((height**2 + width**2) / 2)\n circ_list.append(circ)\n\n # Get overall min radius (semi-minor axi for elipse)\n min_radius = min(min_radius,height,width)\n\n mean_circ = sum(circ_list) / len(circ_list)\n\n # Calculate mesh parameters from inputs and geometry\n maxlen = (0.08 * mean_circ) * refine_factor\n minlen = min(0.1* maxlen, min_radius/4) * refine_factor # in SUMO, it is min_radius/2, but sometimes it leads to meshing errors\n\n # Add mesh parameters in the XML file (.smx)\n meshcrit_xpath = body_xpath + '/MeshCriterion'\n if not sumo.checkElement(meshcrit_xpath):\n sumo.addTextElement(body_xpath, 'MeshCriterion','')\n\n sumo.addTextAttribute(meshcrit_xpath, 'defaults', 'false')\n sumo.addTextAttribute(meshcrit_xpath, 'maxlen', str(maxlen))\n sumo.addTextAttribute(meshcrit_xpath, 'minlen', str(minlen))\n sumo.addTextAttribute(meshcrit_xpath, 'maxphi', '30')\n sumo.addTextAttribute(meshcrit_xpath, 'maxstretch', '6')\n sumo.addTextAttribute(meshcrit_xpath, 'nvmax', '1073741824')\n sumo.addTextAttribute(meshcrit_xpath, 'xcoarse', 'false')\n\n\n # Chage fusage caps\n cap_cnt = sumo.getNamedChildrenCount(body_xpath, 'Cap')\n\n for i_cap in range(cap_cnt):\n cap_xpath = body_xpath + '/Cap[1]'\n sumo.removeElement(cap_xpath)\n\n\n sumo.addTextElementAtIndex(body_xpath,'Cap','',1)\n cap1_xpath = body_xpath + '/Cap[1]'\n sumo.addTextAttribute(cap1_xpath, 'height', '0')\n sumo.addTextAttribute(cap1_xpath, 'shape', 'LongCap')\n sumo.addTextAttribute(cap1_xpath, 'side', 'south')\n\n cap2_xpath = body_xpath + '/Cap[2]'\n sumo.addTextElementAtIndex(body_xpath,'Cap','',2)\n sumo.addTextAttribute(cap2_xpath, 'height', '0')\n sumo.addTextAttribute(cap2_xpath, 'shape', 'LongCap')\n sumo.addTextAttribute(cap2_xpath, 'side', 'north')\n\n\n # Go through every Wing and apply mesh parameters\n if sumo.checkElement(ROOT_XPATH):\n wing_cnt = sumo.getNamedChildrenCount(ROOT_XPATH, 'WingSkeleton')\n log.info(str(wing_cnt) + ' wing(s) has been found.')\n else:\n wing_cnt = 0\n log.warning('No wing has been found in this CPACS file!')\n\n for i_wing in range(wing_cnt):\n wing_xpath = ROOT_XPATH + '/WingSkeleton[' + str(i_wing+1) + ']'\n\n chord_list = []\n\n # Go throught every WingSection\n section_cnt = sumo.getNamedChildrenCount(wing_xpath, 'WingSection')\n for i_sec in range(section_cnt):\n section_xpath = wing_xpath + '/WingSection[' + str(i_sec+1) + ']'\n\n chord_length = sumo.getDoubleAttribute(section_xpath,'chord')\n chord_list.append(chord_length)\n\n # In SUMO refChord is calculated from Area and Span, but this is not\n # trivial to get those value for each wing from the .smx file\n ref_chord = sum(chord_list) / len(chord_list)\n\n # Calculate mesh parameter from inputs and geometry\n maxlen = (0.15 * ref_chord) * refine_factor\n minlen = (0.08* maxlen) * refine_factor # in sumo it is 0.08*maxlen or 0.7*min leading edge radius...?\n\n if refine_level > 1:\n lerfactor = 1 / (2.0 + 0.5 * (refine_level-1))\n terfactor = 1 / (2.0 + 0.5 * (refine_level-1))\n else:\n # correspond to the default value in SUMO\n lerfactor = 1 / 2.0\n terfactor = 1 / 2.0\n\n # Add mesh parameters in the XML file (.smx)\n meshcrit_xpath = wing_xpath + '/WingCriterion'\n if not sumo.checkElement(meshcrit_xpath):\n sumo.addTextElement(wing_xpath, 'WingCriterion','')\n\n sumo.addTextAttribute(meshcrit_xpath, 'defaults', 'false')\n sumo.addTextAttribute(meshcrit_xpath, 'maxlen', str(maxlen))\n sumo.addTextAttribute(meshcrit_xpath, 'minlen', str(minlen))\n sumo.addTextAttribute(meshcrit_xpath, 'lerfactor', str(lerfactor))\n sumo.addTextAttribute(meshcrit_xpath, 'terfactor', str(terfactor))\n sumo.addTextAttribute(meshcrit_xpath, 'maxphi', '30')\n sumo.addTextAttribute(meshcrit_xpath, 'maxstretch', '6')\n sumo.addTextAttribute(meshcrit_xpath, 'nvmax', '1073741824')\n sumo.addTextAttribute(meshcrit_xpath, 'xcoarse', 'false')\n\n cpsf.close_tixi(sumo, sumo_file_path)", "def _generate_mesh(self):\n self._mesh_points = self._make_pos()", "def make_mesh_triangle_meshpy(self, **params):\n c = params['c']\n mesh_info = MeshInfo()\n \n # generate vertices and facets\n if params['obj'] == 'line':\n points, facets, faces = make_vertex_facets_line(params)\n elif params['obj'] == 'hexagon':\n points, facets, faces = make_vertex_facets_hexagon(params)\n elif params['obj'] == 'rect':\n points, facets = make_vertex_facets_rect(params)\n \n print('points = {0}\\nfacets = {1}'.format(pformat(points), pformat(facets)))\n # print('mesh_info.unit = {0}'.format(mesh_info.unit))\n \n # copy points data into mesh\n mesh_info.set_points(points)\n\n # copy facets data into mesh\n mesh_info.set_facets(facets)\n \n # build the mesh\n mesh = build(mesh_info)\n\n # writing objects\n # mesh.write_vtk(\"trigrid.vtk\")\n # f = open('trigrid.pkl', 'wb')\n # pickle.dump(mesh, f)\n # f.close()\n # joblib.dump(mesh, 'trigrid.pkl')\n # sys.exit()\n return mesh", "def reconstruct_mesh(self):\n\n # NOTE: Before drawing the skeleton, create the materials once and for all to improve the\n # performance since this is way better than creating a new material per section or segment\n nmv.builders.create_skeleton_materials(builder=self)\n\n # Verify and repair the morphology, if required\n result, stats = nmv.utilities.profile_function(self.verify_morphology_skeleton)\n self.profiling_statistics += stats\n\n # Apply skeleton - based operation, if required, to slightly modify the skeleton\n result, stats = nmv.utilities.profile_function(\n nmv.builders.modify_morphology_skeleton, self)\n self.profiling_statistics += stats\n\n # Build the soma, with the default parameters\n result, stats = nmv.utilities.profile_function(nmv.builders.reconstruct_soma_mesh, self)\n self.profiling_statistics += stats\n\n # Build the arbors and connect them to the soma\n if self.options.mesh.soma_connection == nmv.enums.Meshing.SomaConnection.CONNECTED:\n\n # Build the arbors\n result, stats = nmv.utilities.profile_function(self.build_arbors, True)\n self.profiling_statistics += stats\n\n # Connect to the soma\n result, stats = nmv.utilities.profile_function(\n nmv.builders.connect_arbors_to_soma, self)\n self.profiling_statistics += stats\n\n # Build the arbors only without any connection to the soma\n else:\n # Build the arbors\n result, stats = nmv.utilities.profile_function(self.build_arbors, False)\n self.profiling_statistics += stats\n\n # Tessellation\n result, stats = nmv.utilities.profile_function(nmv.builders.decimate_neuron_mesh, self)\n self.profiling_statistics += stats\n\n # Surface roughness\n result, stats = nmv.utilities.profile_function(\n nmv.builders.add_surface_noise_to_arbor, self)\n self.profiling_statistics += stats\n\n # Add the spines\n result, stats = nmv.utilities.profile_function(nmv.builders.add_spines_to_surface, self)\n self.profiling_statistics += stats\n\n # Join all the objects into a single object\n result, stats = nmv.utilities.profile_function(\n nmv.builders.join_mesh_object_into_single_object, self)\n self.profiling_statistics += stats\n\n # Transform to the global coordinates, if required\n result, stats = nmv.utilities.profile_function(\n nmv.builders.transform_to_global_coordinates, self)\n self.profiling_statistics += stats\n\n # Collect the stats. of the mesh\n result, stats = nmv.utilities.profile_function(nmv.builders.collect_mesh_stats, self)\n self.profiling_statistics += stats\n\n # Done\n nmv.logger.header('Mesh Reconstruction Done!')\n nmv.logger.log(self.profiling_statistics)\n\n # Write the stats to file\n nmv.builders.write_statistics_to_file(builder=self, tag='skinning')", "def generate_mesh(self):\n length = self.length\n Nx = self.Nx\n Nz = self.Nz\n self.mesh = RectangleMesh(Point(0,0), Point(length, 1), Nx, Nz, \"left/right\")\n\n # Now deform top and bottom based on surface and base profiles\n coordinates = self.mesh.coordinates()\n surf = self.surf_fun(coordinates[:,0])\n bot = self.bot_fun(coordinates[:,0])\n thick = surf-bot\n coordinates[:,1] = coordinates[:,1]*thick + bot\n self.mesh.bounding_box_tree().build(self.mesh)", "def create_mesh( self, node, scene ):\r\n\r\n\t\t# make sure it isn't a mesh posing as a bone\r\n\t\tnode_is_bone = get_node_properties( node, property_name = 'p_bone_name' )\r\n\t\tif node_is_bone or node.GetName().startswith( 'bone_' ):\r\n\t\t\tself.create_bone( node, scene )\r\n\t\telse:\r\n\t\t\tif not node.GetName().lower().startswith( 'collider_' ):\r\n\t\t\t\tself.meshes.append( node )", "def makeGrids(self):\n # make sure connectivity was created\n self.mesh.init()\n # vertices\n cl = zeros((self.mesh.size(0), 3), dtype='d')\n cl[:, :self.dim] = self.mesh.coordinates()\n # keep reference\n self.refs.append(cl)\n # make vtkarray\n v = vtk.vtkPoints()\n v.SetNumberOfPoints(len(cl))\n v.SetData(VN.numpy_to_vtk(cl))\n # add points to a new grid\n self.vtkgrid = [None] * (self.dim + 1)\n # grids for edges, faces, cells\n for dim in range(1, self.dim + 1):\n self.vtkgrid[dim] = vtk.vtkUnstructuredGrid()\n # grids share points\n self.vtkgrid[dim].SetPoints(v)\n # get connectivity from topology\n nl = array(self.mesh.topology()(dim, 0)()).reshape(-1, dim + 1)\n ncells = len(nl)\n # cellsize = dim + 2\n cells = zeros((ncells, dim + 2), dtype=VN.ID_TYPE_CODE)\n cells[:, 1:] = nl\n cells[:, 0] = dim + 1\n self.refs.append(cells)\n # vtk cell array\n ca = vtk.vtkCellArray()\n ca.SetCells(ncells, VN.numpy_to_vtkIdTypeArray(cells))\n # add edges/faces as VTK cells\n if dim == 1:\n self.vtkgrid[dim].SetCells(vtk.VTK_LINE, ca)\n elif dim == 2:\n self.vtkgrid[dim].SetCells(vtk.VTK_TRIANGLE, ca)\n else:\n self.vtkgrid[dim].SetCells(vtk.VTK_TETRA, ca)\n self.vtkgrid[0] = self.vtkgrid[self.dim]", "def fluidity_to_ugrid_by_mesh(state, test, prefix='', exclude=[]):\n\n dimension=state.vector_fields['Coordinate'].dimension\n\n meshes = [mesh for mesh in state.meshes.values() if test(mesh, dimension)]\n\n if not meshes:\n return None\n\n try:\n coordinates = state.vector_fields['OldCoordinate']\n except:\n coordinates = state.vector_fields['Coordinate']\n\n pts = vtk.vtkPoints()\n pts.Allocate(meshes[0].node_count)\n pts.SetNumberOfPoints(meshes[0].node_count)\n \n ugrid = vtk.vtkUnstructuredGrid()\n\n for k in range(meshes[0].element_count):\n id_list = vtk.vtkIdList()\n id_list.Allocate(meshes[0].ele_loc(k))\n id_list.SetNumberOfIds(meshes[0].ele_loc(k))\n shape = meshes[0].shape\n\n lpoint = numpy.zeros(3)\n\n for loc, (point, node) in enumerate(zip(coordinates.remap_ele(k, meshes[0]),\n meshes[0].ele_nodes(k))):\n\n lpoint[:len(point)] = point \n pts.SetPoint(node, *lpoint)\n id_list.SetId(NUM_DICT[(shape.type, shape.dimension, shape.loc)][loc], node)\n\n ugrid.InsertNextCell(CELL_DICT[(shape.type, shape.dimension, shape.loc)], id_list)\n\n ugrid.SetPoints(pts)\n ugrid = fluidity_data_to_ugrid(state, meshes, ugrid, prefix, exclude)\n\n return ugrid", "def setMesh(self, mesh, **kwargs): # secondaryNodes=3):\n super().setMesh(mesh, **kwargs) # ignoreRegionManager=True)\n print(self.mesh(), self.mesh().secondaryNodeCount())\n self.mids = pg.IVector()\n self.nnodes = self.mesh().nodeCount()\n for c in self.mesh().cells():\n n = self.mesh().createSecondaryNode(c.center())\n c.addSecondaryNode(n)\n self.mids.push_back(n.id())\n\n print(self.mesh())", "def __meshing__(self, unit_list):\n self.mesh_grid = []\n for key, zone in unit_list.items():\n counter = 0\n while counter < zone.L:\n self.mesh_grid.append(key)\n counter += 1", "def setAsBackgroundMesh(tag):\n ierr = c_int()\n lib.gmshModelMeshFieldSetAsBackgroundMesh(\n c_int(tag),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelMeshFieldSetAsBackgroundMesh returned non-zero error code: \",\n ierr.value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an XML representation of the Grain instance.
def to_xml(self, doc, file_name=None): grain = doc.createElement('Grain') grain_id = doc.createElement('Id') grain_id_text = doc.createTextNode('%s' % self.id) grain_id.appendChild(grain_id_text) grain.appendChild(grain_id) grain.appendChild(self.orientation.to_xml(doc)) grain_position = doc.createElement('Position') grain_position_x = doc.createElement('X') grain_position.appendChild(grain_position_x) grain_position_x_text = doc.createTextNode('%f' % self.center[0]) grain_position_x.appendChild(grain_position_x_text) grain_position_y = doc.createElement('Y') grain_position.appendChild(grain_position_y) grain_position_y_text = doc.createTextNode('%f' % self.center[1]) grain_position_y.appendChild(grain_position_y_text) grain_position_z = doc.createElement('Z') grain_position.appendChild(grain_position_z) grain_position_z_text = doc.createTextNode('%f' % self.center[2]) grain_position_z.appendChild(grain_position_z_text) grain.appendChild(grain_position) grain_mesh = doc.createElement('Mesh') if not file_name: file_name = self.vtk_file_name() grain_mesh_text = doc.createTextNode('%s' % file_name) grain_mesh.appendChild(grain_mesh_text) grain.appendChild(grain_mesh) return grain
[ "def xml(self):\n return oxml_tostring(self, encoding='UTF-8', standalone=True)", "def generate_xml(self):\n raise NotImplementedError()", "def get_raw_xml_output(self):\n\n return self.xml", "def generate_xml(self):\n assert self.xml_root != None, 'The self.xml_root variable must be set in your inheriting class'\n output = StringIO.StringIO()\n xd = XMLDumper(output, XML_DUMP_PRETTY | XML_STRICT_HDR)\n xd.XMLDumpKeyValue(self.xml_root, self.data.to_dict())\n output.seek(0)\n return output", "def xml(self) -> ET.Element:\n return self.device_info.xml", "def create_xml(self):\n if self.root is not None:\n root = self.root\n signs = root.find('signs')\n if signs is not None: root.remove(signs)\n else:\n root = ET.Element(\"mcbuilder\")\n\n signs = self.signs.create_xml()\n root.insert(0, signs)\n\n return XMLWriter(root).to_string()", "def get_xml(self):\n return etree.tostring(self.xml_tree, pretty_print=True, encoding=\"utf-8\").decode(\"utf-8\")", "def __repr__(self):\n return f\"<Grainbin: {self.id}>\"", "def xml(self):\n return self._domain.xml", "def dump(self):\n return etree.tostring(self.root)", "def print_xml(self):\r\n\t\tfrom lxml import etree\r\n\t\tfrom ..nrml.common import create_nrml_root\r\n\r\n\t\tencoding='latin1'\r\n\t\ttree = create_nrml_root(self, encoding=encoding)\r\n\t\tprint(etree.tostring(tree, xml_declaration=True, encoding=encoding,\r\n\t\t\t\t\t\t\tpretty_print=True))", "def toXML(self):\n definition = xml.dom.minidom.parseString(\"<network>\\n</network>\")\n nameElem = definition.createElement('name')\n nameElem.appendChild(definition.createTextNode(self.name))\n definition.documentElement.appendChild(nameElem)\n\n if self.forward['used']:\n forwardElem = definition.createElement('forward')\n forwardElem.setAttribute('mode', self.forward['type'])\n definition.documentElement.appendChild(forwardElem)\n\n bridgeElem = definition.createElement('bridge')\n bridgeElem.setAttribute('name', self.brName)\n bridgeElem.setAttribute('stp', 'on')\n bridgeElem.setAttribute('delay', '5')\n definition.documentElement.appendChild(bridgeElem)\n\n ipElem = definition.createElement('ip')\n ipElem.setAttribute('address', self.brAddr)\n ipElem.setAttribute('netmask', self.netmask)\n if self.dhcp['used']:\n dhcpElem = definition.createElement('dhcp')\n rangeElem = definition.createElement('range')\n rangeElem.setAttribute('start', self.dhcp['rangeStart'])\n rangeElem.setAttribute('end', self.dhcp['rangeEnd'])\n dhcpElem.appendChild(rangeElem)\n ipElem.appendChild(dhcpElem)\n\n definition.documentElement.appendChild(ipElem)\n\n self.xml = definition.toprettyxml()\n return self.xml", "def to_rdfxml(self):\n return rdfviews.MyndighetsforeskriftDescription(self).to_rdfxml()", "def as_xml(self):\n template = jinja2_env.get_template('episode.xml')\n\n return template.render(\n title=escape(self.title),\n url=quoteattr(self.url),\n guid=escape(self.url),\n mimetype=self.mimetype,\n length=self.length,\n date=formatdate(self.date),\n image_url=self.image,\n )", "def to_xml(self):\n\n base_elem = ET.Element(\"symmetry\")\n x_elem = ET.SubElement(base_elem, \"x\")\n x_elem.text = self._symmetry_type_to_text[self.x_symmetry]\n y_elem = ET.SubElement(base_elem, \"y\")\n y_elem.text = self._symmetry_type_to_text[self.y_symmetry]\n z_elem = ET.SubElement(base_elem, \"z\")\n z_elem.text = self._symmetry_type_to_text[self.z_symmetry]\n\n return base_elem", "def string(self):\n return etree.tostring(self.xml_tree,\n pretty_print=True,\n xml_declaration=False\n ).decode(encoding=\"utf-8\")", "def to_xml(self):\n disks_xml = \"\"\n for disk_obj in self._disks:\n disks_xml += disk_obj.to_xml()\n\n return disks_xml", "def __repr__(self):\n return \"<katpoint.Antenna '%s' diam=%sm at 0x%x>\" % (self.name, self.diameter, id(self))", "def to_xml(self):\n ids = {s: i for (i, s) in enumerate(self.Q())}\n\n return '\\n'.join(\n ['<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>', '<structure><type>fa</type><automaton>'] +\n [\n '<state id=\"%d\" name=\"%s\"><x>0</x><y>0</y>%s</state>' %\n ( ids[name], name, '<initial/>' if name == self.q0 else '<final/>' if name in self.F else '' ) \n for name in self.Q()\n ] + [\n '<transition><from>%d</from><to>%d</to><read>%s</read></transition>' % \n ( ids[t[0]], ids[t[2]], t[1] ) \n for t in self.transitions\n ] + \n ['</automaton></structure>']\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the grain orientation matrix.
def orientation_matrix(self): return self.orientation.orientation_matrix()
[ "def getRotationMatrix( self):", "def get_orientation(self):\n pose = self.get_pose()\n orientation = np.array(pose.r)\n return orientation", "def rotation_matrix(self):\n return np.array([self.axis_u, self.axis_v, self.axis_w])", "def _orientation_vectors(self):\n\n agent_orientations = np.empty((self.num_agents,2),dtype=np.float)\n\n for a_idx, a in enumerate(self._ctrl.agents):\n theta = a.th*2*math.pi/self.num_head_turns\n agent_orientations[a_idx] = [-1*math.sin(theta),math.cos(theta)]\n\n return agent_orientations", "def get_rotationMatrix(self):\n rot_mat = quat2mat(self.quat)\n try:\n [U, s, V] = np.linalg.svd(rot_mat)\n return np.dot(U, V)\n except:\n return np.eye(3)", "def orientation_matrix(euler_angle):\n\n # Convert from degrees to radians\n phi1 = np.deg2rad(euler_angle[0])\n Phi = np.deg2rad(euler_angle[1])\n phi2 = np.deg2rad(euler_angle[2])\n\n # Assemble orientation matrix\n M = np.zeros([3, 3])\n M[0,0] = cos(phi1)*cos(phi2) - sin(phi1)*sin(phi2)*cos(Phi)\n M[0,1] = sin(phi1)*cos(phi2) + cos(phi1)*sin(phi2)*cos(Phi)\n M[0,2] = sin(phi2)*sin(Phi)\n M[1,0] = -cos(phi1)*sin(phi2) - sin(phi1)*cos(phi2)*cos(Phi)\n M[1,1] = -sin(phi1)*sin(phi2) + cos(phi1)*cos(phi2)*cos(Phi)\n M[1,2] = cos(phi2)*sin(Phi)\n M[2,0] = sin(phi1)*sin(Phi)\n M[2,1] = -cos(phi1)*sin(Phi)\n M[2,2] = cos(Phi)\n return M", "def get_mean_orientation(self):\n mean_orientation = [dtm.mean_orientation(stream) for stream in self._data]\n return mean_orientation", "def orientation(self):\r\n tag=self.readinfo('Image Orientation Patient')\r\n \r\n if tag==None:\r\n name=None\r\n elif tag==[-0,1,0,-0,-0,-1]:\r\n name=1 #Sagittal\r\n elif tag==[-1,-0,0,-0,-1,0]:\r\n name=2 #Axial\r\n elif tag==[1,0,0,0,0,-1]:\r\n name=3 #Coronal\r\n else:\r\n name=4 #Oblique\r\n self.orient=name\r\n return", "def rotation_matrix(self) -> Tensor:\n return self.extrinsics[..., :3, :3]", "def rotationMatrix(self):\n\n # R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n # self.exteriorOrientationParameters[5])\n\n return self.__rotationMatrix", "def get_mapRotate(self):\r\n gmi = self.get_gmi()\r\n if gmi is None:\r\n return 0\r\n \r\n return gmi.get_mapRotate()", "def get_orientation(self, rotation):\r\n return self.orientations[rotation % self.max_rotations]", "def RotationMatrix_Image1(self):\r\n return Compute3DRotationMatrix(self.__relativeOrientationImage1[3], self.__relativeOrientationImage1[4],\r\n self.__relativeOrientationImage1[5])", "def _inverse_rotation_matrix(self):\n return simplify(self._parent_rotation_matrix**-1)", "def landscape(self):\n lscape = self._lscape_ref()\n return lscape", "def get_orientation_map(crystal_map):\n eulers = crystal_map.isig[1:4]\n eulers.map(_euler2axangle_signal, inplace=True, show_progressbar=False)\n orientation_map = eulers.as_signal2D((0,1))\n #Set calibration to same as signal\n x = orientation_map.axes_manager.signal_axes[0]\n y = orientation_map.axes_manager.signal_axes[1]\n x.name = 'x'\n x.scale = crystal_map.axes_manager.navigation_axes[0].scale\n x.units = 'nm'\n y.name = 'y'\n y.scale = crystal_map.axes_manager.navigation_axes[0].scale\n y.units = 'nm'\n return orientation_map", "def get_matrix(self):\n return self.mvp", "def rotation(self):\n return self[\"rotation\"]", "def rotator_to_matrix(rotator: Rotator):\n return rotation_to_matrix([rotator.pitch, rotator.yaw, rotator.roll])", "def get_rotation_matrix(theta):\n return np.array([[np.cos(theta), -1 * np.sin(theta)],\n [np.sin(theta), np.cos(theta)]])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a `Grain` instance from a DCT grain file.
def from_dct(label=1, data_dir='.'): grain_path = os.path.join(data_dir, '4_grains', 'phase_01', 'grain_%04d.mat' % label) grain_info = h5py.File(grain_path) g = Grain(label, Orientation.from_rodrigues(grain_info['R_vector'].value)) g.center = grain_info['center'].value # add spatial representation of the grain if reconstruction is available grain_map_path = os.path.join(data_dir, '5_reconstruction', 'phase_01_vol.mat') if os.path.exists(grain_map_path): with h5py.File(grain_map_path, 'r') as f: # because how matlab writes the data, we need to swap X and Z axes in the DCT volume vol = f['vol'].value.transpose(2, 1, 0) from scipy import ndimage grain_data = vol[ndimage.find_objects(vol == label)[0]] g.volume = ndimage.measurements.sum(vol == label) # create the vtk representation of the grain g.add_vtk_mesh(grain_data, contour=False) return g
[ "def from_grain_file(grain_file_path, col_id=0, col_phi1=1, col_phi=2, col_phi2=3, col_x=4, col_y=5, col_z=None, col_volume=None):\n # get the file name without extension\n name = os.path.splitext(os.path.basename(grain_file_path))[0]\n print('creating microstructure %s' % name)\n micro = Microstructure(name=name)\n\n # read grain infos from the grain file\n grains_EBSD = np.genfromtxt(grain_file_path)\n for i in range(len(grains_EBSD)):\n o = Orientation.from_euler([grains_EBSD[i, col_phi1], grains_EBSD[i, col_phi], grains_EBSD[i, col_phi2]])\n g = Grain(int(grains_EBSD[i, col_id]), o)\n z = grains_EBSD[i, col_z] if col_z else 0.\n g.position = np.array([grains_EBSD[i, col_x], grains_EBSD[i, col_y], z])\n if col_volume:\n g.volume = grains_EBSD[i, col_volume]\n micro.grains.append(g)\n return micro", "def read_grain_cat(grain_cat_file=grain_cat_file):\n\n col_names = ['scan_file', 'xpos', 'ypos']\n\n grain_cat = pd.read_table(grain_cat_file, sep=',', header=0,\n skipinitialspace=True, na_filter=False, names=col_names)\n\n grain_cat['particle'] = grain_cat.index + 1\n\n return grain_cat", "def add_grain_file(self, filename):\r\n f = open(filename, 'r')\r\n fin = f.read()\r\n grains = fin.split(\"grain,\")\r\n for i in grains:\r\n grain = i.split(\",\")\r\n if grain[0] != '':\r\n self.add_grain(float(grain[0]), float(grain[1]))\r\n f.close()", "def load(cls, file):\n\n file = str(file)\n\n if 'geom.tab' in file:\n geom_file = file\n rgram_file = file.replace('geom.tab', 'rgram.img')\n elif 'rgram.img' in file:\n geom_file = file.replace('rgram.img', 'geom.tab')\n rgram_file = file\n else:\n raise ValueError('Not a SHARAD file.')\n\n orbit_regex = re.compile(r's_(\\d{6})(\\d{2})_(?:geom.tab|rgram.img)')\n orbit, observation = map(int, orbit_regex.findall(file)[0])\n\n try:\n geom = SharadGeom.load(geom_file)\n except FileNotFoundError:\n geom = None\n\n try:\n rgram = SharadRgram.load(rgram_file)\n except FileNotFoundError:\n rgram = None\n\n out = cls()\n out.G = geom\n out.DM = rgram\n out.file = file\n out.orbit = orbit\n out.observation = observation\n\n return out", "def __readGrain(self, offset):\n sectorOffset = StreamVmdkMedia.__byteOffsetToSectorOffset(offset) #translate the offset in bytes to an offset in sectors\n grainOffset = StreamVmdkMedia.__sectorOffsetToGrainOffset(sectorOffset)\n \n if grainOffset == len(self.__fullGT):\n return self.__incompleteWrittenGrain + StreamVmdkMedia.__padToGrain(self.__incompleteWrittenGrain)\n fileLocation = self.__fullGT[ grainOffset ] * SECTOR_SIZE#get the location in the file where we can find the grain\n \n if fileLocation:\n self.__file.seek( fileLocation + UINT64_BYTE_SIZE)#set the file position to point to the data-length byte of the marker\n compressedLength = struct.unpack(\"=I\", self.__file.read(UINT32_BYTE_SIZE))[0]#extract the required number of bytes\n compressedData = self.__file.read( compressedLength )#read the compressed data\n uncompressedData = zlib.decompress(compressedData)\n if len(uncompressedData) != GRAIN_SIZE:\n logging.critical(\"len(Uncompressed grain) != GRAIN_SIZE\")\n raise VMDKStreamException(\"invalid/corrupted input file! (incorrect grain size)\")\n return uncompressedData#and since we still need to read at least a whole grain we can add all uncompressed data\n else:#null block: add one whole grain of nulls\n return StreamVmdkMedia.__zeroGrain", "def from_file(\n cls,\n filename_0: str,\n miller_0: ArrayLike,\n filename_1: Optional[str] = None,\n miller_1: Optional[ArrayLike] = None,\n mirror_x: bool = False,\n mirror_y: bool = False,\n mirror_z: bool = False,\n average_lattice: bool = False,\n vacuum: Optional[float] = None,\n translation_vec: ArrayLike = [0.0, 0.0, 0.0],\n merge_tol: Optional[float] = None,\n reconstruction: Optional[Callable[[Grain, Site], bool]] = None,\n bonds: Optional[Dict[Sequence[SpeciesLike], float]] = None,\n ftol: float = 0.1,\n tol: float = 0.1,\n max_broken_bonds: int = 0,\n symmetrize: bool = False,\n repair: bool = False,\n bulk_repeats: int = 1,\n thickness: Optional[float] = None,\n hkl_thickness: Optional[float] = None,\n orthogonal_c: bool = False,\n relative_to_bulk_0: bool = False,\n relative_to_bulk_1: bool = False,\n ) -> \"GrainBoundaryGenerator\":\n bulk_0 = Structure.from_file(filename_0)\n bulk_1 = None if filename_1 is None else Structure.from_file(filename_1)\n return cls(\n bulk_0,\n miller_0,\n bulk_1,\n miller_1,\n mirror_x=mirror_x,\n mirror_y=mirror_y,\n mirror_z=mirror_z,\n average_lattice=average_lattice,\n vacuum=vacuum,\n translation_vec=translation_vec,\n merge_tol=merge_tol,\n reconstruction=reconstruction,\n bonds=bonds,\n ftol=ftol,\n tol=tol,\n max_broken_bonds=max_broken_bonds,\n symmetrize=symmetrize,\n repair=repair,\n bulk_repeats=bulk_repeats,\n thickness=thickness,\n hkl_thickness=hkl_thickness,\n orthogonal_c=orthogonal_c,\n relative_to_bulk_0=relative_to_bulk_0,\n relative_to_bulk_1=relative_to_bulk_1,\n )", "def from_dct(data_dir='.', grain_file='index.mat', vol_file='phase_01_vol.mat', mask_file='volume_mask.mat',\n use_dct_path=True, verbose=True):\n if data_dir == '.':\n data_dir = os.getcwd()\n if data_dir.endswith(os.sep):\n data_dir = data_dir[:-1]\n scan = data_dir.split(os.sep)[-1]\n print('creating microstructure for DCT scan %s' % scan)\n micro = Microstructure(name=scan)\n micro.data_dir = data_dir\n if use_dct_path:\n index_path = os.path.join(data_dir, '4_grains', 'phase_01', grain_file)\n else:\n index_path = os.path.join(data_dir, grain_file)\n print(index_path)\n if not os.path.exists(index_path):\n raise ValueError('%s not found, please specify a valid path to the grain file.' % index_path)\n return None\n from scipy.io import loadmat\n index = loadmat(index_path)\n micro.voxel_size = index['cryst'][0][0][25][0][0]\n # grab the crystal lattice\n lattice_params = index['cryst'][0][0][3][0]\n sym = Symmetry.from_string(index['cryst'][0][0][7][0])\n print('creating crystal lattice {} ({}) with parameters {}'.format(index['cryst'][0][0][0][0], sym, lattice_params))\n lattice_params[:3] /= 10 # angstrom to nm\n lattice = Lattice.from_parameters(*lattice_params, symmetry=sym)\n micro.set_lattice(lattice)\n # add all grains to the microstructure\n for i in range(len(index['grain'][0])):\n gid = index['grain'][0][i][0][0][0][0][0]\n rod = index['grain'][0][i][0][0][3][0]\n g = Grain(gid, Orientation.from_rodrigues(rod))\n g.center = index['grain'][0][i][0][0][15][0]\n micro.grains.append(g)\n\n # load the grain map if available\n if use_dct_path:\n grain_map_path = os.path.join(data_dir, '5_reconstruction', vol_file)\n else:\n grain_map_path = os.path.join(data_dir, vol_file)\n if os.path.exists(grain_map_path):\n with h5py.File(grain_map_path, 'r') as f:\n # because how matlab writes the data, we need to swap X and Z axes in the DCT volume\n micro.grain_map = f['vol'][()].transpose(2, 1, 0)\n if verbose:\n print('loaded grain ids volume with shape: {}'.format(micro.grain_map.shape))\n # load the mask if available\n if use_dct_path:\n mask_path = os.path.join(data_dir, '5_reconstruction', mask_file)\n else:\n mask_path = os.path.join(data_dir, mask_file)\n if os.path.exists(mask_path):\n try:\n with h5py.File(mask_path, 'r') as f:\n micro.mask = f['vol'][()].transpose(2, 1, 0).astype(np.uint8)\n except:\n # fallback on matlab format\n micro.mask = loadmat(mask_path)['vol']\n if verbose:\n print('loaded mask volume with shape: {}'.format(micro.mask.shape))\n return micro", "def read_txt_grains(fname):\n\n # Note: (21) fields named below with an underscore are not yet used\n #\n # Fields from grains.out header:\n \"\"\"grain ID completeness chi2\n xi[0] xi[1] xi[2]\n tVec_c[0] tVec_c[1] tVec_c[2]\n vInv_s[0] vInv_s[1] vInv_s[2] vInv_s[4]*sqrt(2) vInv_s[5]*sqrt(2) vInv_s[6]*sqrt(2)\n ln(V[0,0]) ln(V[1,1]) ln(V[2,2]) ln(V[1,2]) ln(V[0,2]) ln(V[0,1])\"\"\"\n\n # Use shortened names in construction of numpy data type.\n\n d = {'names': ('id', 'completeness', 'chisq',\n 'ori_0', 'ori_1', 'ori_2',\n 'cen_0', 'cen_1', 'cen_2',\n 'vi0', 'vi1', 'vi2', 'vi3', 'vi4', 'vi5',\n 'lnV00', 'lnV11', 'lnV22', 'lnV12', 'lnV02', 'lnV01'),\n 'formats': ('i4',) + 20*('f4',)}\n\n return np.loadtxt(fname, dtype=d)", "def from_file(\n cls,\n map_file: str,\n opening_angle: float,\n quantity: str,\n dir_in: str,\n convert_unit: bool = True,\n ) -> \"SkyMap\":\n assert map_file, SkyNamasterWarning(\"There is no file being pointed at\")\n\n file_extension = map_file.split(\".\")[-1]\n if file_extension == \"h5\":\n map_df = pd.read_hdf(map_file, key=\"df\")\n return cls.from_dataframe(\n map_df, opening_angle, quantity, dir_in, map_file, convert_unit\n )\n elif file_extension == \"fits\":\n map_array = hp.read_map(map_file)\n return cls.from_array(\n map_array, opening_angle, quantity, dir_in, map_file\n )\n elif file_extension == \"npy\":\n map_array = np.load(map_file)\n return cls.from_array(\n map_array, opening_angle, quantity, dir_in, map_file\n )", "def load_grain(grains, k):\n grain = -np.ones(dims)\n ind = grains[k][0]-1\n [x, y, z] = np.unravel_index(ind, dims, order='F')\n val = grains[k][1]\n grain[y,x,z] = val\n verts, faces = measure.marching_cubes_classic(grain, 0, spacing=(1, 1, 1))\n return verts, faces", "def create_from_gaf(self, file, **args):\n return self.create_from_file(file, fmt='gaf', **args)", "def fromFile(cls, file_name):\n try:\n ds = gdal.Open(file_name)\n return Raster(ds)\n except (RuntimeError, TypeError, NameError) as e:\n raise e", "def from_neper(neper_file_path):\n neper_file = neper_file_path.split(os.sep)[-1]\n print('creating microstructure from Neper tesselation %s' % neper_file)\n name, ext = os.path.splitext(neper_file)\n print(name, ext)\n assert ext == '.tesr' # assuming raster tesselation\n micro = Microstructure(name=name)\n with open(neper_file_path, 'r', encoding='latin-1') as f:\n line = f.readline() # ***tesr\n # look for **general\n while True:\n line = f.readline().strip() # get rid of unnecessary spaces\n if line.startswith('**general'):\n break\n dim = f.readline().strip()\n print(dim)\n dims = np.array(f.readline().split()).astype(int).tolist()\n print(dims)\n voxel_size = np.array(f.readline().split()).astype(float).tolist()\n print(voxel_size)\n # look for **cell\n while True:\n line = f.readline().strip()\n if line.startswith('**cell'):\n break\n n = int(f.readline().strip())\n print('microstructure contains %d grains' % n)\n f.readline() # *id\n grain_ids = []\n # look for *ori\n while True:\n line = f.readline().strip()\n if line.startswith('*ori'):\n break\n else:\n grain_ids.extend(np.array(line.split()).astype(int).tolist())\n print('grain ids are:', grain_ids)\n oridescriptor = f.readline().strip() # must be euler-bunge:passive\n if oridescriptor != 'euler-bunge:passive':\n print('Wrong orientation descriptor: %s, must be euler-bunge:passive' % oridescriptor)\n for i in range(n):\n euler_angles = np.array(f.readline().split()).astype(float).tolist()\n print('adding grain %d' % grain_ids[i])\n micro.grains.append(Grain(grain_ids[i], Orientation.from_euler(euler_angles)))\n # look for **data\n while True:\n line = f.readline().strip()\n if line.startswith('**data'):\n break\n print(f.tell())\n print('reading data from byte %d' % f.tell())\n data = np.fromfile(f, dtype=np.uint16)[:-4] # leave out the last 4 values\n print(data.shape)\n assert np.prod(dims) == data.shape[0]\n micro.set_grain_map(data.reshape(dims[::-1]).transpose(2, 1, 0), voxel_size[0]) # swap X/Z axes\n micro.recompute_grain_centers()\n print('done')\n return micro", "def from_h5(file_path):\n with h5py.File(file_path, 'r') as f:\n micro = Microstructure(name=f.attrs['microstructure_name'])\n if 'symmetry' in f['EnsembleData/CrystalStructure'].attrs:\n sym = f['EnsembleData/CrystalStructure'].attrs['symmetry']\n parameters = f['EnsembleData/CrystalStructure/LatticeParameters'][()]\n micro.set_lattice(Lattice.from_symmetry(Symmetry.from_string(sym), parameters))\n if 'data_dir' in f.attrs:\n micro.data_dir = f.attrs['data_dir']\n # load feature data\n if 'R_vectors' in f['FeatureData']:\n print('some grains')\n avg_rods = f['FeatureData/R_vectors'][()]\n print(avg_rods.shape)\n if 'grain_ids' in f['FeatureData']:\n grain_ids = f['FeatureData/grain_ids'][()]\n else:\n grain_ids = range(1, 1 + avg_rods.shape[0])\n if 'centers' in f['FeatureData']:\n centers = f['FeatureData/centers'][()]\n else:\n centers = np.zeros_like(avg_rods)\n for i in range(avg_rods.shape[0]):\n g = Grain(grain_ids[i], Orientation.from_rodrigues(avg_rods[i, :]))\n g.center = centers[i]\n micro.grains.append(g)\n # load cell data\n if 'grain_ids' in f['CellData']:\n micro.grain_map = f['CellData/grain_ids'][()]\n if 'voxel_size' in f['CellData/grain_ids'].attrs:\n micro.voxel_size = f['CellData/grain_ids'].attrs['voxel_size']\n if 'mask' in f['CellData']:\n micro.mask = f['CellData/mask'][()]\n if 'voxel_size' in f['CellData/mask'].attrs:\n micro.voxel_size = f['CellData/mask'].attrs['voxel_size']\n return micro", "def to_xml(self, doc, file_name=None):\n grain = doc.createElement('Grain')\n grain_id = doc.createElement('Id')\n grain_id_text = doc.createTextNode('%s' % self.id)\n grain_id.appendChild(grain_id_text)\n grain.appendChild(grain_id)\n grain.appendChild(self.orientation.to_xml(doc))\n grain_position = doc.createElement('Position')\n grain_position_x = doc.createElement('X')\n grain_position.appendChild(grain_position_x)\n grain_position_x_text = doc.createTextNode('%f' % self.center[0])\n grain_position_x.appendChild(grain_position_x_text)\n grain_position_y = doc.createElement('Y')\n grain_position.appendChild(grain_position_y)\n grain_position_y_text = doc.createTextNode('%f' % self.center[1])\n grain_position_y.appendChild(grain_position_y_text)\n grain_position_z = doc.createElement('Z')\n grain_position.appendChild(grain_position_z)\n grain_position_z_text = doc.createTextNode('%f' % self.center[2])\n grain_position_z.appendChild(grain_position_z_text)\n grain.appendChild(grain_position)\n grain_mesh = doc.createElement('Mesh')\n if not file_name:\n file_name = self.vtk_file_name()\n grain_mesh_text = doc.createTextNode('%s' % file_name)\n grain_mesh.appendChild(grain_mesh_text)\n grain.appendChild(grain_mesh)\n return grain", "def fromSegyFile(cls, filename): \n filename = transform_separator(filename)\n\n traceData, volumeHeader, traceHeader = readSegy(filename, endian = '>', rev = None, dsf = None)\n\n \n return cls(traceData, volumeHeader, traceHeader)", "def from_file(cls, filename):\n ring_class = cls() # initialize class\n ring_class.read_ring_file(filename)\n return ring_class # return class as it is a constructor", "def _from_File(self, fname):\n\n # load_seds - load wavelength and seds\n if self._get_type(fname) == \"fits\":\n with pyfits.open(fname) as f:\n self.seds = f[0].data[:-1]\n self.lamb = f[0].data[-1]\n self.grid = Table(fname)\n\n elif self._get_type(fname) == \"hdf\":\n with HDFStore(fname, mode=\"r\") as s:\n self.seds = s[\"/seds\"].read()\n self.lamb = s[\"/lamb\"].read()\n try:\n self.cov_diag = s[\"/covdiag\"].read()\n except Exception:\n self.cov_diag = None\n try:\n self.cov_offdiag = s[\"/covoffdiag\"].read()\n except Exception:\n self.cov_offdiag = None\n self.grid = Table(fname, tablename=\"/grid\")\n\n self._header = self.grid.header", "def __init__(self, grains=None, filename=None, csym=None, ngrain=100,\n cdim=[1.,1.,1.], cang=[90.,90.,90.], ssym=False, epf=None):\n # The grain aggregte can be given either through a file or #\n # passing an array of them to the class directly. #\n # either grains or filename #\n # if none of them is given, a 500-grains file is generated #\n # and returns its grains to the global gr variable. #\n\n if grains==None and filename==None and epf==None:\n print(\" ****************************** \")\n print(\" Since no argument is passed,\")\n print(\" 100 random grains are created\")\n print(\" ****************************** \\n\")\n a = re(ngrain=ngrain)\n gr = np.array(a.euler).transpose()\n gr = np.array([gr[1],gr[2],gr[3]]).transpose()\n temp = []\n for i in range(len(gr)):\n temp.append([gr[i][0],gr[i][1],gr[i][2],0.01])\n self.gr = np.array(temp)\n\n self.epf = epf # global\n\n if grains!=None:\n self.gr = np.array(grains)\n elif filename!=None:\n self.gr = np.genfromtxt(fname=filename,skiprows=4)\n pass\n elif epf!=None: # None is the default for epf\n \"\"\"\n experimental pole figures..\n # available format:\n - UXD\n - steglich\n - bruker\n - epf*\n \"\"\"\n if type(epf).__name__=='list': self.epf_fn = epf\n elif type(epf).__name__=='str': self.epf_fn = [epf]\n elif epf==True:\n fn = [] # list of file names\n print('type the experimental pole figure file names')\n print(\"To finish input, press enter\")\n while True:\n dum = input(\">>> \")\n if len(dum)==0: break\n fn.append(dum)\n pass\n self.epf_fn = fn\n pass\n else: raise IOError('Unexpected epf type found')\n\n ## check if the file name is correct ##\n for i in range(len(self.epf_fn)):\n if not(os.path.isfile(self.epf_fn[i])):\n raise IOError(\"Could not find %s\"%self.epf_fn[i])\n pass\n ## --------------------------------- ##\n\n ## POLE FIGURE MODE --------------------------------------\n print(\"Type the experimental polfe figure mode\")\n print(\"Available options:\", end=' ') #continuation\n print(\"bruker, steglich, epf (default: %s)\"%'epf')\n epf_mode = input(\" >>>\" )\n if len(epf_mode)==0:\n epf_mode='steglich'\n pass\n ##---------------------------------------------------------\n\n self.grid = []; self.hkl = []\n ## more than one pf can be included.\n npole_per_file = []\n if epf_mode=='epf': self.max_khi = [] #Available only for epf_mode yet.\n\n for i in range(len(self.epf_fn)):\n if epf_mode=='epf':\n data, maxk, hkl = epfformat(\n mode=epf_mode,\n filename=self.epf_fn[i]\n )\n # one file may include multiple poles\n for i in range(len(data)):\n self.grid.append(data[i])\n self.max_khi.append(maxk[i])\n self.hkl.append(hkl)\n npole_per_file.append(len(data)) # of pole per a file\n\n else:\n data = epfformat(\n mode=epf_mode,\n filename=self.epf_fn[i]\n )\n self.grid.append(\n data\n )\n self.hkl.append(None)\n self.grid = np.array(self.grid)\n self.epf_mode=epf_mode\n\n ## EXPERIMENTAL POLE FIGURE\n ## ---------------------------------------------------------- ##\n ## POLE FIGURES BINNINGED FROM THE POLYCRYSTALLINE AGGREGATES ##\n\n if epf==None:\n dat = self.gr.transpose()\n phi1 = dat[0]; phi = dat[1]; phi2 = dat[2]\n\n print('phi1: %i ~ %i'%(\n int(round(min(dat[0]/90.)))*90, int(\n round(max(dat[0]/90.)))*90))\n print('phi: %i ~ %i'%(\n int(round(min(dat[1]/90.)))*90, int(\n round(max(dat[1]/90.)))*90))\n print('phi2: %i ~ %i'%(\n int(round(min(dat[2]/90.)))*90, int(\n round(max(dat[2]/90.)))*90))\n ph1min, ph1max= int(\n round(min(dat[0]/90.)))*90, int(\n round(max(dat[0]/90.)))*90\n phmin, phmax = int(\n round(min(dat[1]/90.)))*90, int(\n round(max(dat[1]/90.)))*90\n ph2min, ph2max= int(\n round(min(dat[2]/90.)))*90, int(\n round(max(dat[2]/90.)))*90\n\n ## symmetric multiplication over self.gr is performed unless ph1max==360\n \"\"\"\n Sample symmetry application is pending,\n because it is done over rve (refer to cmb.py)\n \"\"\"\n # nrot = int(round(360./ph1max))\n # if ssym==True:\n # if nrot==4: self.gr = planar_sym(gr=self.gr, nrot=2)\n # else: raise IOError, \"not ready for other nrot than 4\"\n # pass\n\n ### environments global variables\n #1 symmetry\n self.csym = csym\n self.ngr = len(self.gr)\n self.cdim = cdim\n self.cang = cang\n pass\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of phases in this microstructure. For the moment only one phase is supported, so this function simply returns 1.
def get_number_of_phases(self): return 1
[ "def get_num_phases(self) -> int:\n return self._num_phases", "def get_num_timesteps(self):\n return len(self.dm[0])", "def num_steps(self):\n return len(self.voltage_pairs)", "def getNumFrames(self):\n timestep_values = self.readTimesteps() # Takes the values of all timesteps\n return len(timestep_values)", "def stimuliLength(self):\r\n return len(self.stimuli)", "def numeroElements(self):\n count=0\n for c in self._components:\n count+=1\n return count", "def num_traits(self) -> Natural:\n return len(self.trait_cards)", "def getNumOrbitals(self):\n return self.config['System']['n_orbitals']", "def num_parts(self):\n\n return self._num_parts", "def injection_plug_length(self):\n injection_plug_length = (self.pressure * self.diameter**2 * self.duration) / (32 * self.viscosity * self.total_length * 10**2)\n return injection_plug_length", "def _get_count(self) -> \"size_t\" :\n return _core.Palettes__get_count(self)", "def n_tr(self) -> int:\n if self.atoms is None or not hasattr(self.atoms, \"are_linear\"):\n raise ValueError(\n \"Could not determine the number of translations\"\n \"and rotations. Atoms must be set\"\n )\n\n return 5 if self.atoms.are_linear() else 6", "def getCount():\n\t\treturn RigidBody.__count", "def get_num_analogs(self):\n return self.shape[1]", "def number_of_reflection_hyperplanes(self):\n from sage.rings.all import ZZ\n return ZZ.sum(codeg+1 for codeg in self.codegrees())", "def num_elements(obj: SynapseIdModel) -> int:\n return obj.nplast * obj.nstate**2 + obj.nstate", "def num_pieces(self):\n delta_phi = self.base_piece.phi_nom_max - self.base_piece.phi_nom_min\n return 2*np.pi*self.cg.sin_alpha / delta_phi", "def get_num_tracks(self):\n # type: () -> Tuple[Union[float, int], ...]\n return self._num_tracks", "def count (self):\n total = 1\n for dep in self.deps:\n total += dep.count()\n return total" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of grains in this microstructure.
def get_number_of_grains(self): return len(self.grains)
[ "def count_asteroids(self):\n count = 0\n for obj in self.game_objects:\n if type(obj) == Asteroid:\n count += 1\n return count", "def getNumGenes(self):\n return self.data.getNumGenes()", "def nb_ring(self):\n return len(self.__rings)", "def get_current_number_sharks(self) -> int:\n number_sharks = 0\n for coord, animal in self._grid.items():\n if animal.animal_type == Animal.Shark:\n number_sharks += 1\n return number_sharks", "def numberOfNodesInOutline (self):\n \n c = g.top() ; n = 0\n for p in c.allNodes_iter():\n n += 1\n return n", "def ngens(self):\n return self.base_field().ngens() + 1", "def number_of_rings(mol: SmallMolecule) -> int:\n return num_rings(to_mol(mol))", "def get_level_count(wsi) -> int:\n return wsi.resolutions[\"level_count\"] # type: ignore", "def get_garden_area(self, in_raster):\n count = 0\n hist = d.hist(in_raster)\n if s.GARDEN_ID in hist:\n count = hist[s.GARDEN_ID]\n\n return count", "def get_num_species(self):\n return len(self.get_all_species())", "def total (self):\n regions = self.regions\n chrs = regions.keys()\n chrs.sort()\n x = 0\n for chrom in chrs:\n x += len(regions[chrom])\n return x", "def _get_count(self) -> \"size_t\" :\n return _core.FavoriteMaterials__get_count(self)", "def count_levels(self):\r\n lcount = 0\r\n rcount = 0\r\n if self.left:\r\n lcount = self.left.count_levels()\r\n if self.right:\r\n rcount = self.right.count_levels()\r\n return 1 + max(lcount, rcount)", "def _get_count(self) -> \"size_t\" :\n return _core.Materials__get_count(self)", "def population(self):\n return sum([len(s) for s in self.__species])", "def total_num_weighins(self):\n num_weighins_hash = {}\n for key in self.graph.nodes():\n num_weighins_hash[key] = int(float(self.candidates[key][\"weighins\"]))\n\n weighins_nodes = self._annotate_graph(num_weighins_hash, \"weighins\")", "def _get_count(self) -> \"size_t\" :\n return _core.Workspaces__get_count(self)", "def num_sections(self):\r\n return self['e_shnum']", "def __len__(self):\n return len(self.__class__.all_rectangles)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the crystallographic lattice associated with this microstructure.
def set_lattice(self, lattice): self._lattice = lattice
[ "def generate_lattice(self, verbose=False):\n if not self._lattice:\n lat = StrictOrders().get_orders(xrange(1, self.set_n + 1), verbose)\n self._lattice = lat", "def getLattice() :\n lattice = [getElem('loop'),getElem('quad'),getElem('drift'),getElem('quad'),getElem('drift')]\n lattice[3].Kx = -lattice[3].Kx\n return lattice", "def reciprocal_lattice_crystallographic(self) -> \"Lattice\":\n return Lattice(self.reciprocal_lattice.matrix / (2 * np.pi))", "def _add_lattice(self, lattice, cuba_keys):\n name = lattice.name\n lattice_root = self._root.lattice\n\n group = tables.Group(lattice_root, name=name, new=True)\n h5_lattice = H5Lattice.create_new(\n group, lattice.primitive_cell, lattice.size, lattice.origin)\n h5_lattice.data = lattice.data\n\n if cuba_keys is not None:\n for item in lattice.iter(item_type=CUBA.NODE):\n item.data = DataContainer(\n {key: item.data[key] for key in item.data\n if key in cuba_keys[CUBA.NODE]})\n h5_lattice.update([item])\n else:\n h5_lattice.update(lattice.iter(item_type=CUBA.NODE))", "def lattice(before=bool, exclusive=\"string\", after=bool, dualBase=bool, freezeMapping=bool, commonParent=bool, removeTweaks=bool, includeHiddenSelections=bool, frontOfChain=bool, outsideFalloffDistance=float, prune=bool, objectCentered=bool, geometryIndices=bool, split=bool, divisions=int, geometry=\"string\", name=\"string\", latticeReset=bool, scale=\"string\", remove=bool, parallel=bool, outsideLattice=int, ignoreSelected=bool, rotation=int, afterReference=bool, ldivisions=int, deformerTools=bool, position=\"string\"):\n pass", "def get_lattice(self):\n return self._lattice", "def initialize(self):\n self.lattice = 2 * np.random.randint(2, size=(self.N, self.N)) - 1", "def updateLattice(lattice):\n z=0.\n for i in range(len(lattice)):\n lattice[i]['z']=z\n z=z+lattice[i]['length']", "def drawLattice(self):\n for row in range(self.gameState.sizeBoard):\n for col in range(self.gameState.sizeBoard):\n hexagon = self.hexagon(row, col)\n self.background_lattice.append(hexagon)", "def getLatticeType(self):\n return self.config['System']['lattice_type']", "def lattice(self) -> Lattice:\n vacuum = self.vacuum if self.vacuum is not None else self.translation_vec[2]\n height = (\n self.grain_0.thickness\n + self.grain_1.thickness\n + self.translation_vec[2]\n + vacuum\n )\n lattice = self.grain_0.lattice.matrix.copy()\n lattice[2] *= height / lattice[2, 2]\n if self.average_lattice:\n lattice[0] /= self.grain_0.lattice.a\n lattice[0] *= (self.grain_0.lattice.a + self.grain_1.lattice.a) / 2\n lattice[1] /= self.grain_0.lattice.b\n lattice[1] *= (self.grain_0.lattice.b + self.grain_1.lattice.b) / 2\n return Lattice(lattice)", "def lattice(self, perturb):\n\n # Check if perturbation is below maximum allowed. If not, default to maximum perturbation.\n if perturb > 1:\n print('Warning: Random perturbation must not exceed 1. Setting perturb = 1.')\n perturb = 1 # Maximum perturbation\n\n print('Initializing particles with maximum random perturbation of {} times the lattice spacing.'.format(\n perturb * 0.5))\n\n # Determining number of particles per side of simple cubic lattice\n part_per_side = self.total_num_ptcls ** (1. / 3.) # Number of particles per side of cubic lattice\n\n # Check if total number of particles is a perfect cube, if not, place more than the requested amount\n if round(part_per_side) ** 3 != self.total_num_ptcls:\n part_per_side = np.ceil(self.total_num_ptcls ** (1. / 3.))\n print('\\nWARNING: Total number of particles requested is not a perfect cube.')\n print('Initializing with {} particles.'.format(int(part_per_side ** 3)))\n\n dx_lattice = self.pbox_lengths[0] / (self.total_num_ptcls ** (1. / 3.)) # Lattice spacing\n dz_lattice = self.pbox_lengths[1] / (self.total_num_ptcls ** (1. / 3.)) # Lattice spacing\n dy_lattice = self.pbox_lengths[2] / (self.total_num_ptcls ** (1. / 3.)) # Lattice spacing\n\n # Create x, y, and z position arrays\n x = np.arange(0, self.pbox_lengths[0], dx_lattice) + 0.5 * dx_lattice\n y = np.arange(0, self.pbox_lengths[1], dy_lattice) + 0.5 * dy_lattice\n z = np.arange(0, self.pbox_lengths[2], dz_lattice) + 0.5 * dz_lattice\n\n # Create a lattice with appropriate x, y, and z values based on arange\n X, Y, Z = np.meshgrid(x, y, z)\n\n # Perturb lattice\n X += self.rnd_gen.uniform(-0.5, 0.5, np.shape(X)) * perturb * dx_lattice\n Y += self.rnd_gen.uniform(-0.5, 0.5, np.shape(Y)) * perturb * dy_lattice\n Z += self.rnd_gen.uniform(-0.5, 0.5, np.shape(Z)) * perturb * dz_lattice\n\n # Flatten the meshgrid values for plotting and computation\n self.pos[:, 0] = X.ravel() + self.box_lengths[0]/2 - self.pbox_lengths[0]/2\n self.pos[:, 1] = Y.ravel() + self.box_lengths[1]/2 - self.pbox_lengths[1]/2\n self.pos[:, 2] = Z.ravel() + self.box_lengths[2]/2 - self.pbox_lengths[2]/2", "def addLatticeNodes(self,nodes, lattice = None):\n\t\tself.lattice = lattice\n\t\tself.nodes += nodes\n\t\tfor node in self.nodes:\n\t\t\terrCntrl = self._getInstanceOfErrorController()\n\t\t\terrCntrl.setName(\"ErrCntrl:\" + errCntrl.getShortTypeName() + \":\" + node.getName())\n\t\t\terrCntrl.setLattice(lattice)\n\t\t\terrCntrl.setOneNodeParent(node)\n\t\t\tself.error_controllers.append(errCntrl)\n\t\t\tself.node_to_cntrl_dict[node] = errCntrl\n\t\tself.updateErrorParameters()", "def set_cyl(self, co):\n self.set_kind('cyl')\n self.set_coords(co)\n return self", "def lattice(x1,x2,y1,y2, axis_ignore=True, setting_print_flag=False):\n\n # real coordinate from virtual\n [r_x1, r_x2, r_y1, r_y2] = map(lambda x: x * unit_length, [x1, x2, y1, y2])\n\n output = \"\"\n if setting_print_flag:\n output += r\"{p}{p} require tikzsetting2015m2a\" + \"\\n\"\n output += r\"{p}\\setmyintervalwidth{{1.5}}\" + \"\\n\"\n output += r\"{p}\\begin{{tikzpicture}}[scale=1.5]\" + \"\\n\"\n\n x_index_max = x2 - x1 - 1\n x_indices = \"1,2,...\"\n if axis_ignore and (x1 < 0):\n i = -x1 # =abs(x1)\n x_indices += \",{0},{1},{2},...\".format(i-1, i+1, i+2)\n x_indices += \",{0}\".format(x_index_max)\n\n y_index_max = y2 - y1 - 1\n y_indices = \"1,2,...\"\n if axis_ignore and (y1 < 0):\n i = -y1 # =abs(y1)\n y_indices += \",{0},{1},{2},...\".format(i-1, i+1, i+2)\n y_indices += \",{0}\".format(y_index_max)\n\n output += tikz_indent + r\"{p} lattice\" + \"\\n\"\n output += tikz_indent + r\"\\foreach \\k in {{{x_indices}}}\" + \"\\n\"\n output += tikz_indent + r\" \\draw[dash pattern=on \\pgflinewidth off \\myintervalwidth, dash phase=.5\\pgflinewidth]%\" + \"\\n\"\n output += tikz_indent + r\" ($({r_x1},{r_y1})+ \\k*({unit_x})$) -- ($({r_x1},{r_y2})+ \\k*({unit_x})$);\" + \"\\n\"\n output += tikz_indent + r\"\\foreach \\k in {{{y_indices}}}\" + \"\\n\"\n output += tikz_indent + r\" \\draw[dash pattern=on \\pgflinewidth off \\myintervalwidth, dash phase=.5\\pgflinewidth]%\" + \"\\n\"\n output += tikz_indent + r\" ($({r_x1},{r_y1})+ \\k*({unit_y})$) -- ($({r_x2},{r_y1})+ \\k*({unit_y})$);\"\n print(output.format(x_indices=x_indices, y_indices=y_indices,\n r_x1=r_x1, r_x2=r_x2, r_y1=r_y1, r_y2=r_y2,\n unit_x = unit_x, unit_y = unit_y, p = \"%\")\n )", "def make_lattice(self, latt_type = 'cubic', lat_parms):\n\n if latt_type = 'cubic':\n lx, ly, lz = lat_parms\n latt = {}\n latt['box'] = ['cubic', lx, ly, lz]\n latt['xyzs'] = []\n\n # box dimensions in lattice units\n\n # layer number\n for iz in range(lz):\n # layer structure\n for iy in range(ly):\n for ix in range(lx):\n latt['xyzs'].append([ix, iy, iz,1])\n\n elif latt_type = 'bcc':\n lx, ly, lz = lat_parms\n latt = {}\n latt['box'] = ['bcc', lx, ly, lz]\n latt['xyzs'] = []\n\n # box dimensions in lattice units\n\n # layer number\n for iz in range(lz):\n # layer structure\n for iy in range(ly):\n for ix in range(lx):\n if ix + 0.5 <= (lx - 1) and iy + 0.5 <= (ly - 1) and iz + 0.5 <= (lz - 1):\n latt['xyzs'].append([ix + 0.5, iy + 0.5, iz + 0.5, 1])\n latt['xyzs'].append([1 * ix, 1 * iy, 1 * iz, 1])\n\n\n\n elif latt_type = 'fcc':\n lx, ly, lz = lat_parms\n\n latt = {}\n latt['nat'] = lx * ly * lz\n latt['box'] = ['fcc', 2 * lx, ly, lz]\n latt['xyzs'] = []\n\n # box dimensions in lattice units\n\n # layer number\n for iz in range(lz):\n # layer structure\n for iy in range(ly):\n for ix in range(lx):\n rx = 2 * ix + (iy + iz) % 2\n latt['xyzs'].append([rx, iy, iz,1])\n\n return latt", "def getLatticeConstant(self):\n return self.config['System']['lattice_constant']", "def _get_lattice(self, name):\n group = self._root.lattice._f_get_child(name)\n return H5Lattice(group)", "def initLattice(self,inital_distribution):\n \n self._players = [[player() for j in range(self.gridpoints_y)] for i in range(self.gridpoints_x)]", "def _set_basin_positions(self):\n self._set_basin_positions_square_lattice()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the crystallographic lattice associated with this microstructure.
def get_lattice(self): return self._lattice
[ "def getLattice() :\n lattice = [getElem('loop'),getElem('quad'),getElem('drift'),getElem('quad'),getElem('drift')]\n lattice[3].Kx = -lattice[3].Kx\n return lattice", "def reciprocal_lattice_crystallographic(self) -> \"Lattice\":\n return Lattice(self.reciprocal_lattice.matrix / (2 * np.pi))", "def _get_lattice(self, name):\n group = self._root.lattice._f_get_child(name)\n return H5Lattice(group)", "def getLatticeType(self):\n return self.config['System']['lattice_type']", "def getLatticeConstant(self):\n return self.config['System']['lattice_constant']", "def getLatticeBasis(self):\n return self.config['System']['lattice_basis']", "def lattice(self) -> Lattice:\n vacuum = self.vacuum if self.vacuum is not None else self.translation_vec[2]\n height = (\n self.grain_0.thickness\n + self.grain_1.thickness\n + self.translation_vec[2]\n + vacuum\n )\n lattice = self.grain_0.lattice.matrix.copy()\n lattice[2] *= height / lattice[2, 2]\n if self.average_lattice:\n lattice[0] /= self.grain_0.lattice.a\n lattice[0] *= (self.grain_0.lattice.a + self.grain_1.lattice.a) / 2\n lattice[1] /= self.grain_0.lattice.b\n lattice[1] *= (self.grain_0.lattice.b + self.grain_1.lattice.b) / 2\n return Lattice(lattice)", "def generate_lattice(self, verbose=False):\n if not self._lattice:\n lat = StrictOrders().get_orders(xrange(1, self.set_n + 1), verbose)\n self._lattice = lat", "def get_fantasite_sublattice():\n test_data = _make_fantasite_test_data()\n return test_data.sublattice", "def getLatticeVectors(self):\n return self.config['System']['lattice_vectors']", "def makeLattice(sequence):\n\tlattice=Lattice(sequence.getId())\n\tseq_pos=sequence.getPosition()\n\tseq_len=sequence.getLength()\n\n\t#process all thick (len!=0) elements first\n\tprocessThickElements(sequence,lattice)\n\n\t#fill lattice up to end with drift space\n\tif seq_len > lattice.getLength():\n\t\tlen=seq_len-lattice.getLength()\n\t\tlattice.append(Drift(seq_len-len*0.5,len))\n\n\t#special handling for DPLT elements\n\tif sequence.getId() == \"DTL1\":\n\t\tlast=sequence.getNodeWithId(\"DTL_Diag:DPLT:BPM02\")\n\t\tlen=last.getPosition()-lattice.getLength()\n\t\tlattice.append(Drift(lattice.getLength()+len*0.5,len))\n\n\t#process all thin (len=0) ones\n\tprocessThinElements(sequence,lattice)\n\n\treturn lattice", "def reciprocal_lattice(self) -> \"Lattice\":\n v = np.linalg.inv(self._matrix).T\n return Lattice(v * 2 * np.pi)", "def get_single_atom_sublattice():\n test_data = MakeTestData(50, 50)\n test_data.add_atom(25, 20, 2, 2)\n sublattice = test_data.sublattice\n return sublattice", "def get_simple_cubic_sublattice(image_noise=False):\n test_data = _make_simple_cubic_testdata(image_noise=image_noise)\n return test_data.sublattice", "def get_fantasite_atom_lattice():\n atom_lattice = _get_fantasite_atom_lattice()\n return(atom_lattice)", "def make_lattice(self, latt_type = 'cubic', lat_parms):\n\n if latt_type = 'cubic':\n lx, ly, lz = lat_parms\n latt = {}\n latt['box'] = ['cubic', lx, ly, lz]\n latt['xyzs'] = []\n\n # box dimensions in lattice units\n\n # layer number\n for iz in range(lz):\n # layer structure\n for iy in range(ly):\n for ix in range(lx):\n latt['xyzs'].append([ix, iy, iz,1])\n\n elif latt_type = 'bcc':\n lx, ly, lz = lat_parms\n latt = {}\n latt['box'] = ['bcc', lx, ly, lz]\n latt['xyzs'] = []\n\n # box dimensions in lattice units\n\n # layer number\n for iz in range(lz):\n # layer structure\n for iy in range(ly):\n for ix in range(lx):\n if ix + 0.5 <= (lx - 1) and iy + 0.5 <= (ly - 1) and iz + 0.5 <= (lz - 1):\n latt['xyzs'].append([ix + 0.5, iy + 0.5, iz + 0.5, 1])\n latt['xyzs'].append([1 * ix, 1 * iy, 1 * iz, 1])\n\n\n\n elif latt_type = 'fcc':\n lx, ly, lz = lat_parms\n\n latt = {}\n latt['nat'] = lx * ly * lz\n latt['box'] = ['fcc', 2 * lx, ly, lz]\n latt['xyzs'] = []\n\n # box dimensions in lattice units\n\n # layer number\n for iz in range(lz):\n # layer structure\n for iy in range(ly):\n for ix in range(lx):\n rx = 2 * ix + (iy + iz) % 2\n latt['xyzs'].append([rx, iy, iz,1])\n\n return latt", "def crystal_system(self):\n ctype = self.get_attr('crystal_type')\n return CRYSTAL_TYPE_MAP[ctype]", "def lattice(before=bool, exclusive=\"string\", after=bool, dualBase=bool, freezeMapping=bool, commonParent=bool, removeTweaks=bool, includeHiddenSelections=bool, frontOfChain=bool, outsideFalloffDistance=float, prune=bool, objectCentered=bool, geometryIndices=bool, split=bool, divisions=int, geometry=\"string\", name=\"string\", latticeReset=bool, scale=\"string\", remove=bool, parallel=bool, outsideLattice=int, ignoreSelected=bool, rotation=int, afterReference=bool, ldivisions=int, deformerTools=bool, position=\"string\"):\n pass", "def load_lattice(filename):\n lattice = np.load(filename)\n print (\"SOM lattice loaded from %s\" %filename)\n return lattice" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the grain map for this microstructure.
def set_grain_map(self, grain_map, voxel_size): self.grain_map = grain_map self.voxel_size = voxel_size
[ "def set_map(self, map_object):\n for sensor in self.config.sensors:\n sensor.set_map(map_object)", "def type_of_grain(self, type_of_grain):\n\n self._type_of_grain = type_of_grain", "def usermap(self, usermap: ConfigNodePropertyArray):\n\n self._usermap = usermap", "def add_grain(self, ri, l):\r\n self.engine.add_grain(self.engine.ri, ri, l)", "def set_map(self, new_map):\n self.game.mapName = new_map\n parts = new_map.split('_', 1)\n if len(parts) == 2:\n self.game.gameType = parts[0]", "def color_map(self, color_map):\n self._color_map = color_map", "def map_preset(self, pmap):\n global mapboundaries_set, lon_min_map, lon_max_map, lat_min_map, lat_max_map\n global sx0, sy0\n global map_preset, map_manual\n if image_scale == 1:\n p_map = []\n if map_preset == 1:\n # if already preset choosed, delete previous one\n self.member1.delete_point(\"mappreset\")\n for i in range(0, 4):\n p_map.append(CFG[\"presets(x0/y1/x1/y0)\"][pmap][i])\n sx0 = (1911 + ((float(p_map[0]) * 1911) / 180))\n sx1 = (1911 + ((float(p_map[2]) * 1911) / 180))\n if float(p_map[1]) > 0: # point is located in North Hemisphere\n sy0 = (990 - (float(p_map[1]) * 11))\n sy1 = (990 - (float(p_map[3]) * 11))\n else: # point is located in South Hemisphere\n sy0 = (990 + (float(0 - (float(p_map[1]) * 11))))\n sy1 = (990 + (float(0 - float(p_map[3])) * 11))\n self.member1.canvas.create_rectangle(sx0, sy0, sx1, sy1, tag=\"mappreset\", outline='yellow')\n self.member1.delete_point(\"mapmanual\")\n lon_min_map = p_map[0]\n lat_max_map = p_map[1]\n lon_max_map = p_map[2]\n lat_min_map = p_map[3]\n mapboundaries_set = 1\n map_preset = 1\n map_manual = 0\n else:\n self.writelog(\"ERROR : The boundaries selection is forbidden unless map un-zoomed.\")", "def material_map(self, material_map):\n\n material_map = np.array(material_map)\n\n if material_map.shape[0] != self._num_parts:\n raise ValueError(\"Size of material map invalid!\")\n\n self._material_map = material_map", "def recompute_grain_centers(self, verbose=False):\n if not hasattr(self, 'grain_map'):\n print('warning: need a grain map to recompute the center of mass of the grains')\n return\n for g in self.grains:\n try:\n com = self.compute_grain_center(g.id)\n except ValueError:\n print('skipping grain %d' % g.id)\n continue\n if verbose:\n print('grain %d center: %.3f, %.3f, %.3f' % (g.id, com[0], com[1], com[2]))\n g.center = com", "def set_input_map(self, device_name, input_map_name):\n settings = ConfigManager().get_settings(input_map_name)\n if settings:\n self._springy_throttle = settings[\"springythrottle\"]\n self._input_map = ConfigManager().get_config(input_map_name)\n if self._input_device:\n self._input_device.input_map = self._input_map\n Config().get(\"device_config_mapping\")[device_name] = input_map_name", "def set_map(self, place):\r\n\r\n earth_radius = 6378150.0 # [km]\r\n\r\n if place == 'Izu_land':\r\n\r\n # Set lat/long coordinates\r\n # point_origin : map origin\r\n # point_center : circle limt area\r\n # point_range : limit area vertex\r\n self.lim_radius = 50.0 # define circle limit area\r\n\r\n self.point_origin = np.array([34.735972, 139.420944])\r\n\r\n self.point_center = np.array([[34.735972, 139.420944],\r\n [34.735390, 139.421377],\r\n [34.731230, 139.423150]])\r\n\r\n self.point_range = np.array([[34.735715, 139.420922],\r\n [34.731750, 139.421719],\r\n [34.733287, 139.424590],\r\n [34.736955, 139.426038],\r\n [34.738908, 139.423597],\r\n [34.740638, 139.420681],\r\n [34.741672, 139.417387],\r\n [34.735715, 139.420922],\r\n ])\r\n\r\n self.point_center_rel = self.point_center - self.point_origin\r\n self.point_range_rel = self.point_range - self.point_origin\r\n\r\n # Set magnetic declination\r\n self.mag_dec_deg = -7.53 # [deg]\r\n\r\n mag_dec_rad = np.deg2rad(self.mag_dec_deg)\r\n mat_rot = np.array([[np.cos(mag_dec_rad), -1 * np.sin(mag_dec_rad)],\r\n [np.sin(mag_dec_rad), np.cos(mag_dec_rad)]])\r\n\r\n # Convert lat/lon to meter\r\n self.lat2met = 2 * math.pi * earth_radius / 360.0\r\n self.lon2met = 2 * math.pi * earth_radius * np.cos(np.deg2rad(self.point_origin[0])) / 360.0\r\n \r\n # Convert from lat/long to meter (ENU coordinate)\r\n self.xy_center = np.zeros(self.point_center.shape)\r\n self.xy_range = np.zeros(self.point_range.shape)\r\n\r\n self.xy_center[:,0] = self.lon2met * self.point_center_rel[:,1]\r\n self.xy_center[:,1] = self.lat2met * self.point_center_rel[:,0]\r\n self.xy_range[:,0] = self.lon2met * self.point_range_rel[:,1]\r\n self.xy_range[:,1] = self.lat2met * self.point_range_rel[:,0]\r\n\r\n # Apply magnetic effect\r\n for i in range(self.point_center.shape[0]):\r\n self.xy_center[i,:] = mat_rot @ self.xy_center[i,:]\r\n\r\n for i in range(self.point_range.shape[0]):\r\n self.xy_range[i,:] = mat_rot @ self.xy_range[i,:]\r\n\r\n # Setup MAP image --------------------------\r\n # Convert pixel to meter\r\n pixel2meter = 0.946981208125\r\n\r\n # Set map image\r\n img_map = Image.open(\"./map/Izu_map_mag.png\")\r\n img_list = np.asarray(img_map)\r\n img_height = img_map.size[0]\r\n img_width = img_map.size[1]\r\n img_origin = np.array([722, 749]) # TODO : compute by lat/long of launcher point\r\n\r\n # Define image range \r\n img_left = -1.0 * img_origin[0] * pixel2meter\r\n img_right = (img_width - img_origin[0]) * pixel2meter\r\n img_top = img_origin[1] * pixel2meter\r\n img_bottom = -1.0 * (img_height - img_origin[1]) * pixel2meter\r\n\r\n plt.figure(figsize=(10,8))\r\n plt.imshow(img_list, extent=(img_left, img_right, img_bottom, img_top))\r\n\r\n # Define color\r\n color_line = '#ffff33' # Yellow\r\n color_circle = 'r' # Red\r\n\r\n # Set circle object\r\n ax = plt.axes()\r\n\r\n # plot limit area\r\n for i in range(self.point_center.shape[0]):\r\n circle = patches.Circle(xy=self.xy_center[i,:], radius=self.lim_radius,\r\n ec=color_circle, fill=False)\r\n ax.add_patch(circle)\r\n plt.plot(self.xy_center[i,0], self.xy_center[i,1], '.', color=color_circle)\r\n\r\n plt.plot(self.xy_range[:,0], self.xy_range[:,1], '--', color=color_line)", "def load_new_map(self):\n self.map = gamemap.GameMap(self.datamap)", "def set_table(self):\n \n # Expand user path\n mukey_path = os.path.expanduser(mukey_path)\n gdb_path = os.path.expanduser(gdb_path)\n dst = os.path.expanduser(dst)\n \n # Get the Map Unit Aggregated Attribute Table\n mukey = xr.open_rasterio(mukey_path, chunks=(1, 5000, 5000))\n muaggatt = gpd.read_file(gdb_path, layer=\"muaggatt\")\n chorizon = gpd.read_file(gdb_path, layer=\"chorizon\")\n components = gpd.read_file(gdb_path, layer=\"component\")\n components = pd.merge(chorizon, components, on=\"cokey\")\n components = pd.merge(components, muaggatt, on=\"mukey\")\n \n # Put the keys in front\n keys = [c for c in components.columns if \"key\" in c]\n others = [c for c in components.columns if \"key\" not in c]\n new_order = keys + others\n components = components[new_order]\n \n # Get the Horizon Table\n variable_df = components[[\"mukey\", \"chkey\", \"hzname\", variable]]\n units = muaggatt[[\"mukey\", \"muname\"]]\n variable_df = pd.merge(variable_df, units, on=\"mukey\")\n variable_df = variable_df.dropna()\n \n # Now, whats the best way to map these values\n val_dict = dict(zip(variable_df[\"mukey\"].astype(int),\n variable_df[variable]))\n mv = Map_Values(val_dict, err_val=-9999)\n mv.map_file(mukey_path, dst)", "def setBumpMapOverride(state: 'SoState', value: 'SbBool const') -> \"void\":\n return _coin.SoTextureOverrideElement_setBumpMapOverride(state, value)", "def _set_map_(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=map_.map_, is_container='container', presence=False, yang_name=\"map\", rest_name=\"map\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure QoS or sflow Map', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"map_ must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=map_.map_, is_container='container', presence=False, yang_name=\"map\", rest_name=\"map\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure QoS or sflow Map', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__map_ = t\n if hasattr(self, '_set'):\n self._set()", "def set_map_files(\n self,\n sample_file,\n fine_file=None,\n coarse_file=None,\n historical_fine_file=None,\n historical_coarse_file=None,\n dispersal_map=None,\n death_map=None,\n reproduction_map=None,\n ):\n if dispersal_map is None:\n self.dispersal_map.file_name = \"none\"\n else:\n self.dispersal_map.file_name = dispersal_map\n if death_map is None:\n self.death_map.file_name = \"none\"\n else:\n self.death_map.file_name = death_map\n if reproduction_map is None:\n self.reproduction_map.file_name = \"none\"\n else:\n self.reproduction_map.file_name = reproduction_map\n Landscape.set_map_files(\n self,\n sample_file,\n fine_file,\n coarse_file,\n historical_fine_file,\n historical_coarse_file,\n )", "def setCostMap(self, _costMap) -> None:\n ...", "def display_basemap():\n world = gp.read_file(gp.datasets.get_path('naturalearth_lowres'))\n world.plot()", "def add_mapping(self, periph, mapping):\n self._mappings[periph] = mapping" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A simple utility method to show one microstructure slice.
def view_slice(self, slice=None, color='random', show_mask=True): if not hasattr(self, 'grain_map'): print('Microstructure instance mush have a grain_map field to use this method') return if slice is None or slice > self.grain_map.shape[2] - 1 or slice < 0: slice = self.grain_map.shape[2] // 2 print('using slice value %d' % slice) if color == 'random': grain_cmap = Microstructure.rand_cmap(first_is_black=True) elif color == 'ipf': grain_cmap = self.ipf_cmap() else: grain_cmap = 'viridis' fig, ax = plt.subplots() ax.imshow(self.grain_map[:, :, slice].T, cmap=grain_cmap, vmin=0) ax.xaxis.set_label_position('top') plt.xlabel('X') plt.ylabel('Y') if hasattr(self, 'mask') and show_mask: from pymicro.view.vol_utils import alpha_cmap plt.imshow(self.mask[:, :, slice].T, cmap=alpha_cmap(opacity=0.3)) plt.show()
[ "def test_slice_basic(self):\n\n utils.compare_tracing_methods(\n SimpleSliceModel(), torch.rand((2, 3)), skip_to_glow=True\n )", "def printSubDiagram(self, *args):\n return _coin.SoBaseKit_printSubDiagram(self, *args)", "def test_slice(setup):\n assert isinstance(setup[\"sliced\"], da.Array)", "def display_slice(ct_slice_sitk, pet_slice_sitk,\n true_gtv_slice_sitk=None, pred_gtv_slice_sitk=None,\n title=None, dpi=100):\n\n n_inputs = 2\n\n ct_slice_sitk = sitk.Cast(sitk.IntensityWindowing(ct_slice_sitk, windowMinimum=-200, windowMaximum=600,\n outputMinimum=0.0, outputMaximum=255.0), sitk.sitkUInt8)\n pet_slice_sitk = sitk.Cast(sitk.IntensityWindowing(pet_slice_sitk, windowMinimum=0, windowMaximum=10,\n outputMinimum=0.0, outputMaximum=255.0), sitk.sitkUInt8)\n\n\n ct_slice_np = sitk.GetArrayFromImage(ct_slice_sitk)\n pet_slice_np = sitk.GetArrayFromImage(pet_slice_sitk)\n if true_gtv_slice_sitk:\n true_gtv_slice_sitk = true_gtv_slice_sitk * 255\n true_gtv_slice_np = sitk.GetArrayFromImage(true_gtv_slice_sitk)\n n_inputs += 1\n if pred_gtv_slice_sitk:\n pred_gtv_slice_sitk = pred_gtv_slice_sitk * 255\n pred_gtv_slice_np = sitk.GetArrayFromImage(pred_gtv_slice_sitk)\n n_inputs += 1\n\n\n spacing = ct_slice_sitk.GetSpacing() # Get pixel spacing of the slice\n extent = (0, ct_slice_np.shape[0]*spacing[1], ct_slice_np.shape[1]*spacing[0], 0)\n figsize = (n_inputs*5,5)\n #figsize = (n_inputs*ct_slice_np.shape[1]/dpi, n_images*ct_slice_np.shape[0]/dpi)\n\n fig, axs = plt.subplots(nrows=1, ncols=n_inputs, figsize=figsize)\n\n axs[0].imshow(ct_slice_np, extent=extent, interpolation=None, cmap='gray')\n axs[0].set_title(\"CT\")\n axs[1].imshow(pet_slice_np, extent=extent, interpolation=None, cmap='gray')\n axs[1].set_title(\"PET\")\n\n if true_gtv_slice_sitk:\n axs[2].imshow(true_gtv_slice_np, extent=extent, interpolation=None, cmap='gray')\n axs[2].set_title(\"Actual GTV\")\n if pred_gtv_slice_sitk:\n axs[3].imshow(pred_gtv_slice_np, extent=extent, interpolation=None, cmap='gray')\n axs[3].set_title(\"Predicted GTV\")\n\n for ax in axs: ax.axis('off')\n if title: fig.suptitle(title, fontsize='x-large')\n plt.show()", "def _repr_parts(self):\n return []", "def _show(container, start=-1, stop=-1):\n if start == stop:\n cont = [container[start]]\n else:\n cont = container[start:stop]\n\n try:\n for i, img in enumerate(cont):\n DebugDisplay.show_resized(str(i), img)\n except IndexError:\n print(\"No such value\")", "def __getslice__(*args):\n return _Field.vectormats___getslice__(*args)", "def get_slice_from_volume(image, view, slice_id):\n if(view == 1):\n image = np.transpose(image, [2, 0, 1])\n elif(view == 2):\n image = np.transpose(image, [1, 0, 2])\n return image[slice_id]", "def test_slice(self):\r\n img = Image(np.random.randint(0, 255, size=(100, 500, 3), dtype=np.uint8))\r\n\r\n sliced = img[...]\r\n assert np.allclose(sliced, img)\r\n assert sliced.name == img.name\r\n assert not _is_ref_unequal(sliced, img)", "def test_swift_array_slice_formatters(self):\n self.build()\n lldbutil.run_to_source_breakpoint(\n self, \"break here\", lldb.SBFileSpec(\"main.swift\")\n )\n\n for var in (\"someSlice\", \"arraySlice\", \"arraySubSequence\"):\n self.expect(f\"v {var}\", substrs=[f\"{var} = 2 values\", \"[1] = 2\", \"[2] = 3\"])", "def multi_slice_viewer(volume, first_index=0, cmap=None):\n remove_keymap_conflicts({'j', 'k'})\n fig, ax = plt.subplots()\n ax.volume = volume\n ax.index = first_index#volume.shape[0] // 2\n ax.imshow(volume[ax.index], cmap=cmap)\n ax.set_title('slice {}'.format(ax.index))\n fig.canvas.mpl_connect('key_press_event', process_key)", "def show(self, reveal=0):\n \n # XXX use __repr__? str?\n if self.cards == []:\n print(\"[no cards]\")\n return\n else:\n if reveal:\n print(\" \", self.cards[0].name())\n else:\n print(\" one face down card\")\n\n for card in self.cards[1:]:\n print(\" \", card.name())\n\n self.showvalue(reveal)", "def single_slice_helper(self, offset, size):\n if offset < 0:\n offset = 0\n return slice(offset, offset + size)", "def plot_slice(image: sitk.Image):\n img_arr = sitk.GetArrayFromImage(image)\n plt.figure()\n plt.imshow(img_arr[80, :, :], cmap='gray')\n plt.colorbar()\n plt.show()", "def __getslice__(self, *args, **kwargs):\n return _digital_swig.gr_complex_vector___getslice__(self, *args, **kwargs)", "def slice(rec, start=0, end=0):\n\n # Nothing needs to be done\n if not (start or end):\n return rec\n\n # There is end but no start\n if end and not start:\n start = 1\n\n # There start but no end\n if start and not end:\n end = len(rec.seq)\n\n rec.seq = rec.seq[start - 1:end]\n rec.description = f\"{start}:{end} {rec.description}\"\n return rec", "def slides():\n return (\n *subfigure(),\n *mosaic(),\n bar_label(),\n )", "def renderPartition():\n pass", "def get_command(self) -> str:\n return 'sub3section'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a random texture microstructure.
def random_texture(n=100): m = Microstructure(name='random_texture') for i in range(n): m.grains.append(Grain(i + 1, Orientation.random())) return m
[ "def sample_image(self):\n z = torch.randn(1, self.latent_size)\n r_t = self.decoder(z)\n return r_t", "def RandomArt(random = StrongRandom(\"\"), size = 128):\n img = randomart.Create(random, size)\n return img", "def world_texture(hdr_name):\r\n world=bpy.data.worlds['World']\r\n world.use_nodes = True\r\n links = world.node_tree.links\r\n nodes = world.node_tree.nodes\r\n for l in links:\r\n links.remove(l)\r\n for n in nodes:\r\n nodes.remove(n)\r\n world_output = nodes.new(type='ShaderNodeOutputWorld')\r\n background_node = nodes.new(type='ShaderNodeBackground')\r\n if hdr_name[-3:] == 'exr':\r\n background_node.inputs[1].default_value = 100\r\n env_node = nodes.new(type='ShaderNodeTexEnvironment')\r\n env_node.image = bpy.data.images.load(hdr_name)\r\n mapping_node = nodes.new(type='ShaderNodeMapping')\r\n mapping_node.inputs[2].default_value[1] = random.uniform(0, 3.14)\r\n cor_node = nodes.new(type='ShaderNodeTexCoord')\r\n links.new(cor_node.outputs['Generated'],mapping_node.inputs['Vector'])\r\n links.new(mapping_node.outputs['Vector'],env_node.inputs['Vector'])\r\n links.new(env_node.outputs['Color'],background_node.inputs['Color'])\r\n links.new(background_node.outputs['Background'],world_output.inputs['Surface'])\r\n return", "def __random_generation(size: int) -> TUnit:\n return ''.join(map(str, np.random.randint(0, 2, size, int)))", "def give_random_structure(surface=SCREEN):\n structure = STRUCTURES[randrange(len(STRUCTURES))].copy()\n for square in structure.squares:\n if square.full:\n return False\n structure.draw_structure(surface, display_on_screen=True)\n return structure", "def randomizeHairTexture(self):\n img = Image.open(self.texture_path)\n # Take original color out of .png by conversion to BW and back\n img = img.convert(\"LA\")\n img = img.convert(\"RGB\")\n # 0.3 mu random normal mix factor (0 is no color input, 1 is solid color)\n mix_factor = np.random.normal(0.6, 0.1)\n img = (\n RGBTransform()\n .mix_with(Hair.hair_colors[self.hair_color], factor=mix_factor)\n .applied_to(img)\n )\n # save - overwrite current .png texture file\n img.save(self.texture_path)\n return", "def _generate_random_unitaries(self):\n\n n = number_of_qubits(self._qp)\n for i in range(self._samples):\n unitaries_list = []\n for j in range(n):\n u = unitary_group.rvs(2)\n alpha, theta, phi, lam = decompose_yzy(u)\n unitaries_list.append([theta, phi, lam])\n self._unitaries.update({'sample_{}'.format(i): unitaries_list})", "def rand_temp():\n return BASE_T + random() * RAND_MULT", "def texture_compound(self, texture):\n return dict(tex_sampler=texture)", "def random():\n return Scale(Note.random(), Mode.random())", "def meme_rand():\n\n img = random.choice(imgs)\n quote = random.choice(quotes)\n print(f'This are the {quote} in the file')\n path = meme.make_meme(system_path, img, quote.body, quote.author)\n return render_template('meme.html', path=path)", "def choose_template(self):\n # --- imf ---\n imfs = ['chab', 'salp']\n self.imf = imfs[np.random.randint(2)]\n\n # --- metallicity ---\n if self.z <= 1:\n metals = ['m22', 'm32', 'm42', 'm52', 'm62']\n elif (self.z >1 ) & (self.z <= 2):\n metals = ['m22', 'm32', 'm42', 'm52']\n elif (self.z > 2) & (self.z <= 3):\n metals = ['m22', 'm32', 'm42']\n else:\n metals = ['m22', 'm32']\n self.metallicity = metals[np.random.randint(len(metals))]\n\n # --- sfh ---\n sfhs = ['tau0p01', 'tau0p50', 'tau5p00']\n self.sfh = sfhs[np.random.random_integers(0,2)]\n\n return '_'.join([self.imf, self.metallicity, self.sfh])", "def random_unit(cls):\n\n #\n # This method is adapted from \n # http://mathworld.wolfram.com/SpherePointPicking.html\n #\n theta = random() * pi * 2.0\n return vector2d(cos(theta), sin(theta))", "def load(self):\n pos_x = self._load_face(self.meta.pos_x, face_name=\"pos_x\")\n pos_y = self._load_face(self.meta.pos_y, face_name=\"pos_y\")\n pos_z = self._load_face(self.meta.pos_z, face_name=\"pos_z\")\n neg_x = self._load_face(self.meta.neg_x, face_name=\"neg_x\")\n neg_y = self._load_face(self.meta.neg_y, face_name=\"neg_y\")\n neg_z = self._load_face(self.meta.neg_z, face_name=\"neg_z\")\n\n self._validate([pos_x, pos_y, pos_z, neg_x, neg_y, neg_z])\n\n texture = self.ctx.texture_cube(\n (pos_x.width, pos_x.height),\n pos_x.components,\n pos_x.data + neg_x.data + pos_y.data + neg_y.data + pos_z.data + neg_z.data,\n )\n texture.extra = {\"meta\": self.meta}\n\n if self.meta.mipmap_levels is not None:\n self.meta.mipmap = True\n\n if self.meta.mipmap:\n if isinstance(self.meta.mipmap_levels, tuple):\n texture.build_mipmaps(*self.meta.mipmap_levels)\n else:\n texture.build_mipmaps()\n\n if self.meta.anisotropy:\n texture.anisotropy = self.meta.anisotropy\n\n return texture", "def meme_rand():\n img = None\n quote = None\n\n img = random.choice(imgs)\n quote = random.choice(quotes)\n\n path = meme.make_meme(img, quote.body, quote.author)\n return render_template('meme.html', path=path)", "def texture(self, tname,\n tcoords=None,\n interpolate=True,\n repeat=True,\n edgeClamp=False,\n ):\n pd = self.polydata(False)\n if tname is None:\n pd.GetPointData().SetTCoords(None)\n pd.GetPointData().Modified()\n return self\n\n if isinstance(tname, vtk.vtkTexture):\n tu = tname\n else:\n if tcoords is not None:\n if not isinstance(tcoords, np.ndarray):\n tcoords = np.array(tcoords)\n if tcoords.ndim != 2:\n colors.printc('tcoords must be a 2-dimensional array', c=1)\n return self\n if tcoords.shape[0] != pd.GetNumberOfPoints():\n colors.printc('Error in texture(): nr of texture coords must match nr of points', c=1)\n return self\n if tcoords.shape[1] != 2:\n colors.printc('Error in texture(): vector must have 2 components', c=1)\n tarr = numpy_to_vtk(np.ascontiguousarray(tcoords), deep=True)\n tarr.SetName('TCoordinates')\n pd.GetPointData().SetTCoords(tarr)\n pd.GetPointData().Modified()\n else:\n if not pd.GetPointData().GetTCoords():\n tmapper = vtk.vtkTextureMapToPlane()\n tmapper.AutomaticPlaneGenerationOn()\n tmapper.SetInputData(pd)\n tmapper.Update()\n tc = tmapper.GetOutput().GetPointData().GetTCoords()\n pd.GetPointData().SetTCoords(tc)\n pd.GetPointData().Modified()\n\n fn = settings.textures_path + tname + \".jpg\"\n if os.path.exists(tname):\n fn = tname\n elif not os.path.exists(fn):\n colors.printc(\"File does not exist or texture\", tname,\n \"not found in\", settings.textures_path, c=\"r\")\n colors.printc(\"~pin Available built-in textures:\", c=\"m\", end=\" \")\n for ff in os.listdir(settings.textures_path):\n colors.printc(ff.split(\".\")[0], end=\" \", c=\"m\")\n print()\n return self\n\n fnl = fn.lower()\n if \".jpg\" in fnl or \".jpeg\" in fnl:\n reader = vtk.vtkJPEGReader()\n elif \".png\" in fnl:\n reader = vtk.vtkPNGReader()\n elif \".bmp\" in fnl:\n reader = vtk.vtkBMPReader()\n else:\n colors.printc(\"Error in texture(): supported files, PNG, BMP or JPG\", c=\"r\")\n return self\n reader.SetFileName(fn)\n reader.Update()\n\n tu = vtk.vtkTexture()\n tu.SetInputData(reader.GetOutput())\n tu.SetInterpolate(interpolate)\n tu.SetRepeat(repeat)\n tu.SetEdgeClamp(edgeClamp)\n\n self.GetProperty().SetColor(1, 1, 1)\n self._mapper.ScalarVisibilityOff()\n self.SetTexture(tu)\n self.Modified()\n return self", "def init(self, state: 'SoState') -> \"void\":\n return _coin.SoTextureUnitElement_init(self, state)", "def get_random_tileid():\r\n return randrange(0, len(ALL_TILES))", "def texture(self, page=0):\n return self.pages[page]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a random color map. The first color can be enforced to black and usually figure out the background. The random seed is fixed to consistently produce the same colormap.
def rand_cmap(N=4096, first_is_black=False): np.random.seed(13) rand_colors = np.random.rand(N, 3) if first_is_black: rand_colors[0] = [0., 0., 0.] # enforce black background (value 0) return colors.ListedColormap(rand_colors)
[ "def random_color_gen():\n r = lambda: random.randint(0, 255)\n return 'ff%02X%02X%02X' % (r(), r(), r())", "def random_color():\n return systemrandom.randint(0x000000, 0xFFFFFF)", "def get_colormap(num_agents):\n colors = cm.get_cmap('jet', num_agents)\n colors = colors(range(num_agents))\n np.random.shuffle(colors)\n return colors", "def random_color():\n r = lambda: random.randint(0,255)\n return('#%02X%02X%02X' % (r(),r(),r()))", "def randomColor():\n return Color((_random.randint(0,255),_random.randint(0,255),_random.randint(0,255)))", "def random_colors():\n def r():\n return random.randint(0, 255)\n return 'rgb({},{},{})'.format(r(), r(), r())", "def random_color():\n r = randint(0, 255)\n g = randint(0, 255)\n b = randint(0, 255)\n\n return r, g, b", "def randcolor():\n r = random(0.0, 1.0)\n g = random(0.0, 1.0)\n b = random(0.0, 1.0)\n return vector(r, g, b) # A color is a three-element vector", "def make_random_color(self):\n \n color = \"#\"\n for number in range(6):\n color += \"%01x\" % random.randint(6, 0xD)\n return color", "def setRandomColor(self):\n self.color=mycolors.random()", "def randColor():\n h = 0.3\n v = 0.85\n s = 0.9\n \n for c in startColors:\n yield c\n\n while True:\n \n def toHex(x):\n return hex(int(x*255))[2:]\n \n r, g, b = hsv_to_rgb(h, s, v)\n \n yield u'#' + toHex(r) + toHex(g) + toHex(b)\n\n h += 2 / (1+math.sqrt(5))\n h = h - 1 if h > 1 else h\n #v = 1-v", "def rand_color() -> list:\n\n # IMPORT DONE HERE TO SAVE TIME AT MODULE INIT\n import random\n\n return [random.randrange(256), random.randrange(256), random.randrange(256)]", "def getRandomColor():\n color = \"#\"\n for number in range(6):\n color += toHexChar(randint(0, 15))\n return color", "def initialize_map():\n map = np.zeros(shape = World.SHAPE, dtype = np.int16)\n i = 0\n while i < World.NUM_OF_FOOD:\n x = random.randint(0,World.SHAPE[0] - 1)\n y = random.randint(0,World.SHAPE[1] - 1)\n if map[x, y] == 0:\n map[x, y] = 1\n i += 1\n return map", "def randcolor(value=1):\n return rgb_float_to_int(colorsys.hsv_to_rgb(random.random(), 1, value))", "def get_random_color():\n R = random.randint(200, 250)\n G = random.randint(200, 250)\n B = random.randint(200, 250)\n random_rgb = (R, G, B)\n return random_rgb", "def get_rand_color(self):\n color_min = 200\n self.color = list(numpy.random.randint(0, 255, 3))\n i = 0\n while sum(self.color) < color_min:\n self.color = list(numpy.random.randint(10, 255, 3))\n if i == 10:\n break\n i += 1\n return self.color", "def change_colors():\n global t,u,v,w,x,y,z\n t = randint(0,27)\n u = randint(0,27)\n v = randint(0,27)\n w = randint(0,27)\n x = randint(0,27)\n y = randint(0,27)\n z = randint(0,27)\n return t,u,v,w,x,y,z,", "def _background_color(s):\n seed(s)\n r = v = b = 255\n while r + v + b > 255*2:\n r = randint(0, 255)\n v = randint(0, 255)\n b = randint(0, 255)\n return (r, v, b)", "def randomcolor(eps=.1):\n r = round(random()/eps)*eps\n g = round(random()/eps)*eps\n b = round(random()/eps)*eps\n return (r,g,b)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a colormap with ipf colors.
def ipf_cmap(self): N = len(self.grains) ipf_colors = np.zeros((4096, 3)) for g in self.grains: ipf_colors[g.id, :] = g.orientation.get_ipf_colour() return colors.ListedColormap(ipf_colors)
[ "def CO2p_colormap(bad=None, n=256):\n\n # color sequence from black -> purple -> white\n cmap_colors = [(0, 0, 0), (0.7255, 0.0588, 0.7255), (1, 1, 1)]\n\n # set colormap name\n cmap_name = 'CO2p'\n\n # make a colormap using the color sequence and chosen name\n cmap = colors.LinearSegmentedColormap.from_list(cmap_name, cmap_colors, N=n)\n\n # set the nan color\n if bad is not None:\n try:\n cmap.set_bad(bad)\n except:\n raise Exception('Invalid choice for bad data color. Try a color tuple, e.g., (0,0,0).')\n\n # return the colormap\n return cmap", "def get_inferno_rev_cmap():\n inferno_colormap = colormap_utils.ensure_colormap('inferno')\n inferno_rev_colormap = {\n 'colors': np.copy(inferno_colormap.colors)[::-1],\n 'name': 'inferno_inv',\n 'interpolation': 'linear'\n }\n inferno_rev_colormap['colors'][-1, -1] = 0\n return inferno_rev_colormap", "def naivecolormap(value):\r\n # value2pixel(0.5) -> (0.5,0.5,0.5)\r\n red = (value & 0x00ff0000) >> 16\r\n green = (value & 0x0000ff00) >> 8\r\n blue = (value & 0x000000ff) >> 0\r\n \r\n return (int(red), int(green), int(blue)) # rgb\r", "def CO_colormap(bad=None, n=256):\n\n # color sequence from black -> red -> white\n cmap_colors = [(0, 0, 0), (0.722, 0.051, 0), (1, 1, 1)]\n\n # set colormap name\n cmap_name = 'CO'\n\n # make a colormap using the color sequence and chosen name\n cmap = colors.LinearSegmentedColormap.from_list(cmap_name, cmap_colors, N=n)\n\n # set the nan color\n if bad is not None:\n try:\n cmap.set_bad(bad)\n except:\n raise Exception('Invalid choice for bad data color. Try a color tuple, e.g., (0,0,0).')\n\n # return the colormap\n return cmap", "def custom_cmap(n):\n # first color is grey from Set1, rest other sensible categorical colourmap\n cmap_array = sns.color_palette(\"Set1\", 9)[-1:] + sns.husl_palette(n - 1, h=.6, s=0.7)\n cmap = colors.LinearSegmentedColormap.from_list('mmdgm_cmap', cmap_array)\n return cmap, cmap_array", "def create_trash_label_colormap():\n colormap = np.zeros((11, 3), dtype=np.uint8)\n for inex, (_, r, g, b) in enumerate(CLASS_COLORMAP):\n colormap[inex] = [r, g, b]\n \n return colormap", "def label_colormap(n_label=256):\n def bitget(byteval, idx):\n return ((byteval & (1 << idx)) != 0)\n\n cmap = np.zeros((n_label, 3))\n for i in six.moves.range(0, n_label):\n id = i\n r, g, b = 0, 0, 0\n for j in six.moves.range(0, 8):\n r = np.bitwise_or(r, (bitget(id, 0) << 7 - j))\n g = np.bitwise_or(g, (bitget(id, 1) << 7 - j))\n b = np.bitwise_or(b, (bitget(id, 2) << 7 - j))\n id = (id >> 3)\n cmap[i, 0] = r\n cmap[i, 1] = g\n cmap[i, 2] = b\n cmap = cmap.astype(np.float32) / 255\n return cmap", "def _build_colormap(c_map, f_map):\n\n return {k: v for k, v in c_map + f_map if v is not None}", "def gencolors(n, cmap='jet'):\n c = matplotlib.cm.get_cmap('Set1')\n cols = c(np.linspace(0, 1, n))\n clist = [matplotlib.colors.rgb2hex(rgb) for rgb in cols]\n return clist", "def make_colormap(mycolors,ninterp=100): \n\n thecolors = np.array([])\n for i in range(len(mycolors)-1):\n rs = np.linspace(mycolors[i][0],mycolors[i+1][0],ninterp,endpoint=1)\n gs = np.linspace(mycolors[i][1],mycolors[i+1][1],ninterp,endpoint=1)\n bs = np.linspace(mycolors[i][2],mycolors[i+1][2],ninterp,endpoint=1)\n \n thecolors = np.append(thecolors,np.array([rs,gs,bs,[1]*len(rs)]).T)\n thecolors = thecolors.reshape(-1,4)\n indices = 1.0*np.arange(len(thecolors))/len(thecolors)\n\n\n def my_cmap(i):\n try:\n len(i)\n argmin = np.argmin((indices[:,None]-i)**2,axis=0)\n except:\n argmin = np.argmin((indices-i)**2)\n \n return thecolors[argmin]\n\n return my_cmap", "def getColors(n,cpick='jet'):\n\t\n\t# Create colormap\n\tc = cmap.get_cmap(cpick)\n\t\n\t# Create list of colors\n\tcolors=[]\n\tfor i in range(n):\n\t\tcolors.append(c(float(i)/n))\n\t\n\treturn colors", "def getcmaprgb(N, cmap):\n\treturn cmap(np.linspace(0,255,N).astype(int))", "def create_minc_segmentation_label_colormap():\n colormap = create_pascal_label_colormap()[:23]\n colormap[5] = [255, 246, 0] # food\n colormap[16] = [237, 166, 118] # skin\n colormap[14] = [247, 69, 69] # plastic\n colormap[10] = [255, 255, 170] # mirror\n #colormap = np.asarray([\n # [91, 10, 6], # brick\n # [171, 173, 29], # carpet\n # [203, 204, 201], # ceramic\n # [183, 25, 128], # fabric\n # [4, 89, 10], # foliage\n # [107, 255, 218], # food\n # [74, 145, 127], # glass\n # [76, 50, 10], # hair\n # [209, 125, 0], # leather\n # [97, 0, 209], # metal\n # [0, 209, 135], # mirror\n # [255, 255, 255], # other\n # [0, 255, 46], # painted\n # [219, 249, 137], # paper\n # [247, 69, 69], # plastic\n # [0, 60, 100], # polishedstone\n # [237, 166, 118], # skin\n # [0, 0, 230], # sky\n # [119, 11, 32], # stone\n # [255, 246, 0], # tile\n # [0, 187, 255], # wallpaper\n # [255, 0, 0], # water\n # [255, 63, 0], # wood\n #])\n return colormap", "def discrete_cmap(N, base_cmap=None):\n return plt.cm.get_cmap(base_cmap, N)", "def _build_cmap(cp_file, gid_file):\n keys = _parse_array_from_str(cp_file.read(), 'I', 'big')\n gids = _parse_array_from_str(gid_file.read(), 'H', 'big')\n cmap = dict.fromkeys(keys)\n\n for i, key in enumerate(keys):\n cmap[key] = gids[i]\n return cmap", "def cmap(self, cmap):\r\n colormap =cm.get_cmap(cmap) \r\n if get_vizEngine().lower().strip() == 'bokeh': \r\n paletteName = [plt.colors.rgb2hex(m) for m in colormap(np.arange(colormap.N))]\r\n cmap = paletteName\r\n elif get_vizEngine().lower().strip() == 'plotly':\r\n pl_entries = 255\r\n h = 1.0/(pl_entries-1)\r\n pl_colorscale = []\r\n for k in range(pl_entries):\r\n C = list(map(np.uint8, np.array(colormap(k*h)[:3])*255))\r\n pl_colorscale.append([k*h, 'rgb'+str((C[0], C[1], C[2]))])\r\n cmap = pl_colorscale\r\n self.__cmap = cmap", "def get_color(x, cmap=plt.cm.plasma):\n mag = torch.max(x) - torch.min(x)\n x_norm = (x.float() - torch.min(x))/mag\n return cmap(x_norm)", "def discrete_cmap(N, base_cmap='Paired'):\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "def color_palette(self):\n color = [[0.00, 0.00, 1.00],\\\n [0.24, 0.16, 0.75],\\\n [0.36, 0.24, 0.63],\\\n [0.53, 0.34, 0.47],\\\n [0.77, 0.50, 0.22],\\\n [1.00, 0.63, 0.00],\\\n [1.00, 0.50, 0.00],\\\n [1.00, 0.37, 0.00],\\\n [1.00, 0.24, 0.00],\\\n [1.00, 0.10, 0.00],\\\n [1.00, 0.00, 0.00]]\n return color", "def _cmap_from_data(x):\n if np.alltrue(x >= 0):\n cmap = plt.cm.Reds\n elif np.alltrue(x <= 0):\n cmap = plt.cm.Blues\n else:\n cmap = plt.cm.Blues_r\n return cmap" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a `Microstructure` reading grain infos from a file.
def from_grain_file(grain_file_path, col_id=0, col_phi1=1, col_phi=2, col_phi2=3, col_x=4, col_y=5, col_z=None, col_volume=None): # get the file name without extension name = os.path.splitext(os.path.basename(grain_file_path))[0] print('creating microstructure %s' % name) micro = Microstructure(name=name) # read grain infos from the grain file grains_EBSD = np.genfromtxt(grain_file_path) for i in range(len(grains_EBSD)): o = Orientation.from_euler([grains_EBSD[i, col_phi1], grains_EBSD[i, col_phi], grains_EBSD[i, col_phi2]]) g = Grain(int(grains_EBSD[i, col_id]), o) z = grains_EBSD[i, col_z] if col_z else 0. g.position = np.array([grains_EBSD[i, col_x], grains_EBSD[i, col_y], z]) if col_volume: g.volume = grains_EBSD[i, col_volume] micro.grains.append(g) return micro
[ "def from_h5(file_path):\n with h5py.File(file_path, 'r') as f:\n micro = Microstructure(name=f.attrs['microstructure_name'])\n if 'symmetry' in f['EnsembleData/CrystalStructure'].attrs:\n sym = f['EnsembleData/CrystalStructure'].attrs['symmetry']\n parameters = f['EnsembleData/CrystalStructure/LatticeParameters'][()]\n micro.set_lattice(Lattice.from_symmetry(Symmetry.from_string(sym), parameters))\n if 'data_dir' in f.attrs:\n micro.data_dir = f.attrs['data_dir']\n # load feature data\n if 'R_vectors' in f['FeatureData']:\n print('some grains')\n avg_rods = f['FeatureData/R_vectors'][()]\n print(avg_rods.shape)\n if 'grain_ids' in f['FeatureData']:\n grain_ids = f['FeatureData/grain_ids'][()]\n else:\n grain_ids = range(1, 1 + avg_rods.shape[0])\n if 'centers' in f['FeatureData']:\n centers = f['FeatureData/centers'][()]\n else:\n centers = np.zeros_like(avg_rods)\n for i in range(avg_rods.shape[0]):\n g = Grain(grain_ids[i], Orientation.from_rodrigues(avg_rods[i, :]))\n g.center = centers[i]\n micro.grains.append(g)\n # load cell data\n if 'grain_ids' in f['CellData']:\n micro.grain_map = f['CellData/grain_ids'][()]\n if 'voxel_size' in f['CellData/grain_ids'].attrs:\n micro.voxel_size = f['CellData/grain_ids'].attrs['voxel_size']\n if 'mask' in f['CellData']:\n micro.mask = f['CellData/mask'][()]\n if 'voxel_size' in f['CellData/mask'].attrs:\n micro.voxel_size = f['CellData/mask'].attrs['voxel_size']\n return micro", "def from_neper(neper_file_path):\n neper_file = neper_file_path.split(os.sep)[-1]\n print('creating microstructure from Neper tesselation %s' % neper_file)\n name, ext = os.path.splitext(neper_file)\n print(name, ext)\n assert ext == '.tesr' # assuming raster tesselation\n micro = Microstructure(name=name)\n with open(neper_file_path, 'r', encoding='latin-1') as f:\n line = f.readline() # ***tesr\n # look for **general\n while True:\n line = f.readline().strip() # get rid of unnecessary spaces\n if line.startswith('**general'):\n break\n dim = f.readline().strip()\n print(dim)\n dims = np.array(f.readline().split()).astype(int).tolist()\n print(dims)\n voxel_size = np.array(f.readline().split()).astype(float).tolist()\n print(voxel_size)\n # look for **cell\n while True:\n line = f.readline().strip()\n if line.startswith('**cell'):\n break\n n = int(f.readline().strip())\n print('microstructure contains %d grains' % n)\n f.readline() # *id\n grain_ids = []\n # look for *ori\n while True:\n line = f.readline().strip()\n if line.startswith('*ori'):\n break\n else:\n grain_ids.extend(np.array(line.split()).astype(int).tolist())\n print('grain ids are:', grain_ids)\n oridescriptor = f.readline().strip() # must be euler-bunge:passive\n if oridescriptor != 'euler-bunge:passive':\n print('Wrong orientation descriptor: %s, must be euler-bunge:passive' % oridescriptor)\n for i in range(n):\n euler_angles = np.array(f.readline().split()).astype(float).tolist()\n print('adding grain %d' % grain_ids[i])\n micro.grains.append(Grain(grain_ids[i], Orientation.from_euler(euler_angles)))\n # look for **data\n while True:\n line = f.readline().strip()\n if line.startswith('**data'):\n break\n print(f.tell())\n print('reading data from byte %d' % f.tell())\n data = np.fromfile(f, dtype=np.uint16)[:-4] # leave out the last 4 values\n print(data.shape)\n assert np.prod(dims) == data.shape[0]\n micro.set_grain_map(data.reshape(dims[::-1]).transpose(2, 1, 0), voxel_size[0]) # swap X/Z axes\n micro.recompute_grain_centers()\n print('done')\n return micro", "def createStructureFromBVH(file_name):\n\n\t\tassert file_name[-4:] == \".bvh\"\n\n\t\tRoot = Node(None)\n\n\t\tinfo_frame = []\n\n\t\twith open(file_name) as motion_info:\n\t\t\tf = motion_info.read().strip()\n\t\t\tf = f.split(\"\\n\")\n\t\t\tline = Node.readline(f)\n\n\t\t\twhile (Node.readline(f)[0] != \"MOTION\"):\n\t\t\t\tcontinue\n\n\t\t\tprint(\"ROOT NODE CREATED\")\n\t\t\tnumber_of_frames = int(Node.readline(f)[1])\n\t\t\tRoot.frame_time = float(Node.readline(f)[2])\n\n\t\t\tinfo_frame = [[float(j) for j in i.split(\" \")] for i in f]\n\n\n\t\twith open(file_name) as f: # Use file to refer to the file object\n\t\t\tf = f.read().strip()\n\t\t\tf = f.split(\"\\n\")\n\t\t\tline = Node.readline(f)\n\n\t\t\twhile (line[0] != \"ROOT\"):\n\t\t\t\tline = Node.readline(f)\n\t\t\t\tif line is None: # On n'a pas trouvé la root dans le fichier\n\t\t\t\t\treturn Root\n\n\t\t\tRoot.name = line[1]\n\t\t\toffset, position, rotation = Node.readInfo(f, info_frame)\n\t\t\tRoot.translate = offset; Root.position = position; Root.rotate = rotation\n\n\t\t\tNode.CreateChild(Root, f, info_frame);\n\n\t\t\t# We need to assert that info_frame is fully empty\n\t\t\tfor i in info_frame:\n\t\t\t\tassert len(i) == 0\n\n\t\t\treturn Root", "def __init__(self, file: str | PathLike):\n ms1, ms2, metadata = LoadMGF(name_field='scans').load_spectra([str(file)])\n logger.info('%d molecules parsed from MGF file', len(ms1))\n self._spectra = _mols_to_spectra(ms2, metadata)", "def construct_simulation_from_toml(filename: str) -> Simulation:\n with open(filename) as f:\n input_data = toml.load(f)\n\n return Simulation(input_data)", "def from_file(self, register_filepath):\n\n with open(register_filepath,'r') as reg_file:\n for line in reg_file:\n self.store_register(json.loads(line.strip()))", "def instanceFromFile(file):\n graphMode = True\n\n graph = dict()\n obligationsSet = []\n with open(file) as f :\n for line in f:\n\n if line == \"\\n\":\n graphMode = False\n\n\n elif graphMode:\n if line[-1] == '\\n':\n line = line[:-1]\n\n info = line.split(\":\")\n graph[info[0]] = set(info[1:])\n\n else:\n if line[-1] == '\\n':\n line = line[:-1]\n obligation = set(line.split(\",\"))\n obligationsSet.append(obligation)\n\n\n if obligationsSet == []:\n printW(\"Warning : No obligation set found. It will be automatically provide\")\n obligationsSet, n = obligationsGenerator(graph, minObligations = 1, maxObligations = len(graph.keys()))\n else :\n n = len(obligationsSet)\n\n meta = {\"type\" : \"From file\", \"vertices\" : len(graph.keys()), \"obligations\" : len(obligationsSet)}\n return (graph, obligationsSet, meta)", "def structure_init() -> StructureData:\n import os\n import pymatgen\n\n #filepath = os.path.realpath(os.path.join(os.path.dirname(__file__), '../../../common/data/Si.cif'))\n filepath = os.path.realpath(os.path.join(os.path.dirname(__file__), '../../../common/data/Al.cif'))\n structure = pymatgen.Structure.from_file(filepath, primitive=False)\n\n return StructureData(pymatgen_structure=structure)", "def add_grain_file(self, filename):\r\n f = open(filename, 'r')\r\n fin = f.read()\r\n grains = fin.split(\"grain,\")\r\n for i in grains:\r\n grain = i.split(\",\")\r\n if grain[0] != '':\r\n self.add_grain(float(grain[0]), float(grain[1]))\r\n f.close()", "def from_file(file,):\n\n raise NotImplementedError(\n \"Loading Spectrograms from images is not implemented yet\"\n )", "def generate(self):\n # we open file\n with open(self.file, \"r\") as file:\n structure_level = []\n # we rune line of file\n for line in file:\n line_level = []\n # we run sprites (letters) contain in file\n for sprite in line:\n # we ignore the \"\\n\" of end line\n if sprite != '\\n':\n # we add the sprite at line of level\n line_level.append(sprite)\n # we add the line at structure level\n structure_level.append(line_level)\n # we save structure\n self.structure = structure_level", "def load(fn: str) -> 'GripperInfo':\n res = GripperInfo(fn,-1)\n with open(fn,'r') as f:\n jsonobj = json.load(f)\n res.fromJson(jsonobj)\n GripperInfo.register(res)\n return res", "def readAsLM(self, path):\n try:\n f = open(path,'r',encoding='utf-8')\n except UnicodeDecodeError:\n f = open(path,'r',encoding='utf-16')\n self.enc = f.encoding\n self.append(Tier()) #Only 1 tier here\n for line in f:\n if line.startswith(\"#\"): #metadata\n match = re.compile(r\"#WAVEFORM NAME: (.+)\").search(line)\n if match:\n self.waveformName = match.groups()[0]\n continue\n match = re.compile(r\"#WAVEFORM CHECKSUM: (.+)\").search(line)\n if match:\n self.waveformChecksum = match.groups()[0]\n continue\n else:\n match = re.compile(r\"(.+) (.+)\").search(line)\n if match:\n self[0].append(Point(match.groups()[0], match.groups()[1]))\n if len(self[0].items)>0:\n self.xmax = self[0].items[-1].time\n else:\n self.xmax = \"0\"", "def create_data_structures(file_path):\n\n # Creating the base of the two data structures\n board = {}\n entities = {}\n\n # Reading the file\n fh = open(file_path, 'r')\n board_info = fh.readlines()\n fh.close()\n\n # Deleting all the \\n\n element_id = 0\n for line in board_info:\n board_info[element_id] = line[0:-1]\n element_id += 1\n\n # Creating all the coordinates in the board\n nb_lines, nb_columns = board_info[1].split()\n nb_lines = int(nb_lines)\n nb_columns = int(nb_columns)\n\n for y in range (1, nb_lines + 1):\n for x in range(1, nb_columns + 1):\n board[(y, x)] = []\n\n # Creating the hubs in entities dict\n hub_blue = board_info[3].split()\n hub_red = board_info[4].split()\n\n entities['hub_blue'] = {'coordinates': (int(hub_blue[0]),int(hub_blue[1])), 'type': 'hub', 'team': 'blue', 'structure_points': int(hub_blue[2]),\n 'storage_capacity' : int(hub_blue[3]), 'available_energy': int(hub_blue[3]), 'regeneration_rate': int(hub_blue[4])}\n entities['hub_red'] = {'coordinates': (int(hub_red[0]),int(hub_red[1])), 'type': 'hub', 'team': 'red', 'structure_points': int(hub_red[2]),\n 'storage_capacity' : int(hub_red[3]), 'available_energy': int(hub_red[3]), 'regeneration_rate': int(hub_red[4])}\n\n # Creating the peaks in entities dict\n peak_id = 1\n for line in board_info[6:]:\n peak_info = line.split()\n entities['peak_%s' % str(peak_id)] = {'coordinates' : (int(peak_info[0]), int(peak_info[1])), 'type' : 'peak', 'available_energy' : int(peak_info[2])}\n peak_id += 1\n\n # actualising the board_dict with the information of entities dict\n board = actualise_board(board, entities)\n\n return board, entities, nb_columns, nb_lines", "def from_dct(data_dir='.', grain_file='index.mat', vol_file='phase_01_vol.mat', mask_file='volume_mask.mat',\n use_dct_path=True, verbose=True):\n if data_dir == '.':\n data_dir = os.getcwd()\n if data_dir.endswith(os.sep):\n data_dir = data_dir[:-1]\n scan = data_dir.split(os.sep)[-1]\n print('creating microstructure for DCT scan %s' % scan)\n micro = Microstructure(name=scan)\n micro.data_dir = data_dir\n if use_dct_path:\n index_path = os.path.join(data_dir, '4_grains', 'phase_01', grain_file)\n else:\n index_path = os.path.join(data_dir, grain_file)\n print(index_path)\n if not os.path.exists(index_path):\n raise ValueError('%s not found, please specify a valid path to the grain file.' % index_path)\n return None\n from scipy.io import loadmat\n index = loadmat(index_path)\n micro.voxel_size = index['cryst'][0][0][25][0][0]\n # grab the crystal lattice\n lattice_params = index['cryst'][0][0][3][0]\n sym = Symmetry.from_string(index['cryst'][0][0][7][0])\n print('creating crystal lattice {} ({}) with parameters {}'.format(index['cryst'][0][0][0][0], sym, lattice_params))\n lattice_params[:3] /= 10 # angstrom to nm\n lattice = Lattice.from_parameters(*lattice_params, symmetry=sym)\n micro.set_lattice(lattice)\n # add all grains to the microstructure\n for i in range(len(index['grain'][0])):\n gid = index['grain'][0][i][0][0][0][0][0]\n rod = index['grain'][0][i][0][0][3][0]\n g = Grain(gid, Orientation.from_rodrigues(rod))\n g.center = index['grain'][0][i][0][0][15][0]\n micro.grains.append(g)\n\n # load the grain map if available\n if use_dct_path:\n grain_map_path = os.path.join(data_dir, '5_reconstruction', vol_file)\n else:\n grain_map_path = os.path.join(data_dir, vol_file)\n if os.path.exists(grain_map_path):\n with h5py.File(grain_map_path, 'r') as f:\n # because how matlab writes the data, we need to swap X and Z axes in the DCT volume\n micro.grain_map = f['vol'][()].transpose(2, 1, 0)\n if verbose:\n print('loaded grain ids volume with shape: {}'.format(micro.grain_map.shape))\n # load the mask if available\n if use_dct_path:\n mask_path = os.path.join(data_dir, '5_reconstruction', mask_file)\n else:\n mask_path = os.path.join(data_dir, mask_file)\n if os.path.exists(mask_path):\n try:\n with h5py.File(mask_path, 'r') as f:\n micro.mask = f['vol'][()].transpose(2, 1, 0).astype(np.uint8)\n except:\n # fallback on matlab format\n micro.mask = loadmat(mask_path)['vol']\n if verbose:\n print('loaded mask volume with shape: {}'.format(micro.mask.shape))\n return micro", "def load_file(self, filepath):\n with open(filepath, encoding = self.encoding) as descriptor:\n content = descriptor.read()\n meta, body = self.split(content)\n\n return MetaFile(filepath, meta, body, self.meta_render, self.body_render)", "def create_from_gaf(self, file, **args):\n return self.create_from_file(file, fmt='gaf', **args)", "def generate(self):\n # File opening\n with open(self.file) as file:\n level_structure = []\n # We map the file line by line\n for line in file:\n level_line = []\n # We map all the line's sprites (letters)\n for sprite in line:\n # We ignore the end of line sprites\n if sprite != '\\n':\n # If not end of line we add the sprite to the line\n level_line.append(sprite)\n # We then add the line to the level list level_structure\n level_structure.append(level_line)\n # We save the level_structure\n self.structure = level_structure", "def read(f):\n\tp = HMMParser()\n\treturn p.read(f)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load a Microstructure object from an xml file. It is possible to restrict the grains which are loaded by providing the list of ids of the grains of interest.
def from_xml(xml_file_name, grain_ids=None, verbose=False): if verbose and grain_ids: print('loading only grain ids %s' % grain_ids) micro = Microstructure() dom = parse(xml_file_name) root = dom.childNodes[0] name = root.childNodes[0] micro.name = name.childNodes[0].nodeValue grains = root.childNodes[1] for node in grains.childNodes: if grain_ids and not (int(node.childNodes[0].childNodes[0].nodeValue) in grain_ids): continue if verbose: print(node) micro.grains.append(Grain.from_xml(node, verbose)) return micro
[ "def load(class_, path, api, profile):\n if path is None:\n path = os.path.join(os.path.dirname(__file__), 'gl.xml')\n obj = class_(api, profile)\n obj.dependencies.append(path)\n tree = etree.parse(path)\n for e in tree.getroot():\n try:\n func = getattr(obj, '_read_' + e.tag)\n except AttributeError:\n raise ValueError('Unknown tag: {}'.format(e.tag))\n func(e)\n return obj", "def load(name):\n g = read_graphml(\"graphs//\" + name + \".graphml\", node_type=int)\n return g", "def from_h5(file_path):\n with h5py.File(file_path, 'r') as f:\n micro = Microstructure(name=f.attrs['microstructure_name'])\n if 'symmetry' in f['EnsembleData/CrystalStructure'].attrs:\n sym = f['EnsembleData/CrystalStructure'].attrs['symmetry']\n parameters = f['EnsembleData/CrystalStructure/LatticeParameters'][()]\n micro.set_lattice(Lattice.from_symmetry(Symmetry.from_string(sym), parameters))\n if 'data_dir' in f.attrs:\n micro.data_dir = f.attrs['data_dir']\n # load feature data\n if 'R_vectors' in f['FeatureData']:\n print('some grains')\n avg_rods = f['FeatureData/R_vectors'][()]\n print(avg_rods.shape)\n if 'grain_ids' in f['FeatureData']:\n grain_ids = f['FeatureData/grain_ids'][()]\n else:\n grain_ids = range(1, 1 + avg_rods.shape[0])\n if 'centers' in f['FeatureData']:\n centers = f['FeatureData/centers'][()]\n else:\n centers = np.zeros_like(avg_rods)\n for i in range(avg_rods.shape[0]):\n g = Grain(grain_ids[i], Orientation.from_rodrigues(avg_rods[i, :]))\n g.center = centers[i]\n micro.grains.append(g)\n # load cell data\n if 'grain_ids' in f['CellData']:\n micro.grain_map = f['CellData/grain_ids'][()]\n if 'voxel_size' in f['CellData/grain_ids'].attrs:\n micro.voxel_size = f['CellData/grain_ids'].attrs['voxel_size']\n if 'mask' in f['CellData']:\n micro.mask = f['CellData/mask'][()]\n if 'voxel_size' in f['CellData/mask'].attrs:\n micro.voxel_size = f['CellData/mask'].attrs['voxel_size']\n return micro", "def from_neper(neper_file_path):\n neper_file = neper_file_path.split(os.sep)[-1]\n print('creating microstructure from Neper tesselation %s' % neper_file)\n name, ext = os.path.splitext(neper_file)\n print(name, ext)\n assert ext == '.tesr' # assuming raster tesselation\n micro = Microstructure(name=name)\n with open(neper_file_path, 'r', encoding='latin-1') as f:\n line = f.readline() # ***tesr\n # look for **general\n while True:\n line = f.readline().strip() # get rid of unnecessary spaces\n if line.startswith('**general'):\n break\n dim = f.readline().strip()\n print(dim)\n dims = np.array(f.readline().split()).astype(int).tolist()\n print(dims)\n voxel_size = np.array(f.readline().split()).astype(float).tolist()\n print(voxel_size)\n # look for **cell\n while True:\n line = f.readline().strip()\n if line.startswith('**cell'):\n break\n n = int(f.readline().strip())\n print('microstructure contains %d grains' % n)\n f.readline() # *id\n grain_ids = []\n # look for *ori\n while True:\n line = f.readline().strip()\n if line.startswith('*ori'):\n break\n else:\n grain_ids.extend(np.array(line.split()).astype(int).tolist())\n print('grain ids are:', grain_ids)\n oridescriptor = f.readline().strip() # must be euler-bunge:passive\n if oridescriptor != 'euler-bunge:passive':\n print('Wrong orientation descriptor: %s, must be euler-bunge:passive' % oridescriptor)\n for i in range(n):\n euler_angles = np.array(f.readline().split()).astype(float).tolist()\n print('adding grain %d' % grain_ids[i])\n micro.grains.append(Grain(grain_ids[i], Orientation.from_euler(euler_angles)))\n # look for **data\n while True:\n line = f.readline().strip()\n if line.startswith('**data'):\n break\n print(f.tell())\n print('reading data from byte %d' % f.tell())\n data = np.fromfile(f, dtype=np.uint16)[:-4] # leave out the last 4 values\n print(data.shape)\n assert np.prod(dims) == data.shape[0]\n micro.set_grain_map(data.reshape(dims[::-1]).transpose(2, 1, 0), voxel_size[0]) # swap X/Z axes\n micro.recompute_grain_centers()\n print('done')\n return micro", "def from_grain_file(grain_file_path, col_id=0, col_phi1=1, col_phi=2, col_phi2=3, col_x=4, col_y=5, col_z=None, col_volume=None):\n # get the file name without extension\n name = os.path.splitext(os.path.basename(grain_file_path))[0]\n print('creating microstructure %s' % name)\n micro = Microstructure(name=name)\n\n # read grain infos from the grain file\n grains_EBSD = np.genfromtxt(grain_file_path)\n for i in range(len(grains_EBSD)):\n o = Orientation.from_euler([grains_EBSD[i, col_phi1], grains_EBSD[i, col_phi], grains_EBSD[i, col_phi2]])\n g = Grain(int(grains_EBSD[i, col_id]), o)\n z = grains_EBSD[i, col_z] if col_z else 0.\n g.position = np.array([grains_EBSD[i, col_x], grains_EBSD[i, col_y], z])\n if col_volume:\n g.volume = grains_EBSD[i, col_volume]\n micro.grains.append(g)\n return micro", "def loadXml(self, xml):\n if xml is not None:\n for xprop in xml:\n self.loadXmlProperty(xprop)", "def load_xml(self):\n try:\n self.root = XMLReader(self.path).root\n\n #for sign in self.root.findall('./signs/sign'):\n # self.load_sign_xml(sign)\n\n for block in self.root.findall('./blocks/block'):\n self.load_block_xml(block)\n\n # load replacments etc...\n except Exception, e:\n log.exception('error loading buildfile')", "def load(self, fname):\n parser = etree.XMLParser(strip_cdata=False)\n doc = etree.parse(fname, parser=parser)\n beans = doc.xpath(\"//b:bean\", namespaces=NAMESPACES)\n for bean in beans:\n b = self.beanLoaderFactory(bean)\n # self.beans.append( b )", "def load(self, filename):\n readingRelationships = False\n for line in open(filename,'r').readlines():\n line = line.strip()\n if (line == \"\"):\n readingRelationships = True\n continue\n if (readingRelationships):\n self._readRelationship(line)\n else: self._readNode(line)\n self.nodes.sort(key = lambda x: x.id)", "def loadFromXmlPart(self,xmlPart):\r\n\t\t\"\"\" we need to display the egg of the station \"\"\"\r\n\t\tself.id=int(xmlPart.getElementsByTagName('idstation')[0].firstChild.data)\r\n\t\tself.name=str(xmlPart.getElementsByTagName('name')[0].firstChild.data)\r\n\t\tposx=float(xmlPart.getElementsByTagName('posx')[0].firstChild.data)\r\n\t\tposy=float(xmlPart.getElementsByTagName('posy')[0].firstChild.data)\r\n\t\tposz=float(xmlPart.getElementsByTagName('posz')[0].firstChild.data)\r\n\t\thprh=float(xmlPart.getElementsByTagName('hprh')[0].firstChild.data)\r\n\t\thprp=float(xmlPart.getElementsByTagName('hprp')[0].firstChild.data)\r\n\t\thprr=float(xmlPart.getElementsByTagName('hprr')[0].firstChild.data)\r\n\t\tself.mass=float(xmlPart.getElementsByTagName('mass')[0].firstChild.data)\r\n\t\tself.egg=str(xmlPart.getElementsByTagName('egg')[0].firstChild.data)\r\n\t\tself.scale=float(xmlPart.getElementsByTagName('scale')[0].firstChild.data)\r\n\t\tself.exitZone=int(xmlPart.getElementsByTagName('exitzone')[0].firstChild.data)\r\n\t\tself.pos=(posx,posy,posz)\r\n\t\tself.hpr=(hprh,hprp,hprr)\r\n\t\tif self.inSpace==True:\r\n\t\t\tself.node=loader.loadModel(shimConfig.getInstance().getConvRessourceDirectory() +self.egg)\r\n\t\t\tself.node.reparentTo(render)\r\n\t\t\tself.node.setName( self.name)\r\n\t\t\tself.node.setPos(self.pos)\r\n\t\t\tself.node.setHpr(self.hpr)\r\n\t\t\tself.node.setTag(\"name\",\"station\")\r\n\t\t\tself.node.setTag(\"classname\",\"station\")\r\n\t\t\tself.node.setTag(\"id\",str(self.id))\r\n\t\t\tself.node.setShaderAuto()", "def load_graph(self, file_path):\n file_name, file_extension = os.path.splitext(file_path)\n self.graphname = os.path.basename(file_name)\n self.graph = minidom.parse(file_path)\n self.validation_output_graph = self.graph.cloneNode(True)\n # Populate all node related lists\n has_errors = self.userdata.populate_userdata(self.graph, self.validation_output_graph)\n self.graph_has_errors |= has_errors\n\n has_errors = self.image_nodes.populate_image_nodes_lists(self.graph,\n self.userdata,\n self.validation_output_graph)\n self.graph_has_errors |= has_errors\n\n self.function_nodes.populate_function_nodes_indexed_lists(self.graph, self.library, self.image_nodes)", "def _load_model_object(self):\n # load models for objects\n path = xml_path_completion(furniture_xmls[self._furniture_id])\n logger.debug(\"load furniture %s\" % path)\n resize_factor = None\n if self._manual_resize is not None:\n resize_factor = 1 + self._manual_resize\n elif self._config.furn_size_rand != 0:\n rand = self._init_random(1, \"resize\")[0]\n resize_factor = 1 + rand\n self._objects = MujocoXMLObject(path, debug=self._debug, resize=resize_factor)\n self._objects.hide_visualization()\n part_names = self._objects.get_children_names()\n\n # furniture pieces\n lst = []\n for part_name in part_names:\n lst.append((part_name, self._objects))\n\n self.mujoco_objects = OrderedDict(lst)\n self.n_objects = len(self.mujoco_objects)\n self.mujoco_equality = self._objects.equality", "def load(filename):\n return XMLReader().fromFile(open(filename))", "def from_graphML(self, in_file):\n pass", "def load_set(self, path):\n \n basepath = os.path.split(path)[0]\n \n file = open(path, \"r\")\n for line in file:\n tokens = line.split()\n if len(tokens) > 0:#skip blank lines\n if len( tokens) != 3:\n raise Exception( \"Invalid asset line {\" + line + \"}\")\n\n type = tokens[0] \n tag = tokens[1]\n path = os.path.join( basepath, tokens[2])\n self._assets[ tag] = _Asset( path, type)", "def load_osm(\n filepath, specific_dtypes={\"risk\": float, \"global_risk\": float, \"length\": float}\n):\n # NOTE :\n # Use specific_dtypes to convert risk to float to avoid error as\n # TypeError: unsupported operand type(s) for +: 'int' and 'str'\n # during the route computing on the risk attributes\n\n G = ox.load_graphml(\n filepath=filepath, node_dtypes=specific_dtypes, edge_dtypes=specific_dtypes\n )\n return G", "def load(self):\n\n # Create Inventory object instance\n self.general = Inventory('item_data.txt')\n\n # Create children inventory objects instances\n foodinv = FoodInventory('item_data.txt')\n techinv = TechInventory('item_data.txt')\n medinv = MedicineInventory('item_data.txt')\n clothinv = ClothingInventory('item_data.txt')\n musicinv = MusicInventory('item_data.txt')\n\n # Define list of categorized inventories\n self.cate_list = [foodinv.create_foodList(),techinv.create_techList(),medinv.create_medList(),clothinv.create_clothingList(),musicinv.create_musicList()]", "def from_xml(self, filename):\n # #print(os.getcwd())\n root = ET.parse(filename).getroot()\n for node in root:\n if node.tag == \"persoon\":\n p = Person(node.attrib[\"naam\"], id_=node.attrib[\"id\"])\n for i in node:\n if i.tag == \"head\":\n self.head = p\n elif i.tag == \"geb\":\n p.birth = i.text\n elif i.tag == \"stf\":\n p.dead = i.text\n elif node.tag == \"familie\":\n parents = []\n children = []\n div = False\n for i in node:\n if i.tag in (\"ouder\", \"kind\"):\n try:\n p = Person.all_[i.text]\n except KeyError:\n p = Person(\"ERROR\", id_=i.text)\n if i.tag == \"ouder\":\n parents.append(p)\n else:\n children.append(p)\n elif i.tag == \"divorsed\":\n div = True\n self.families.append(Family(parents, children, div))", "def load(self, filename):\n\n # reinitialize the object\n self.__init__()\n # fill in the object\n o = open(filename)\n s = o.read()\n a = ArffFile.parse(s)\n self.relation = a.relation\n self.attributes = a.attributes\n self.attribute_types = a.attribute_types\n self.attribute_data = a.attribute_data\n self.comment = a.comment\n self.data = a.data\n o.close()", "def LoadLevel(levelElement):\n name = levelElement.attrib['name']\n id = int(levelElement.attrib['id'])\n rows, columns = LoadGridSettings(levelElement.find('grid'))\n defenses = LoadDefenses(levelElement.find('defenses'))\n init = LevelInit(name, id, rows, columns, defenses)\n return Level(init)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a particular grain given its id. This method browses the microstructure and return the grain corresponding to the given id. If the grain is not found, the method raises a `ValueError`.
def get_grain(self, gid): for grain in self.grains: if grain.id == gid: return grain raise ValueError('grain %d not found in the microstructure' % gid)
[ "def getByID(id_genus:int):\n genus = GenusAPI().get_by_id(id_genus)\n schema = GenusSchema()\n results = schema.load(genus, many=False)\n return results[0]", "def get_fuse_region(self, _id):\n for fuse_region in self.fuse_region:\n if fuse_region.id == _id:\n return fuse_region\n else:\n raise RuntimeError('Fuse Region not found for id: ' + _id)", "def find(self, id):\n try:\n return self.magnets[id]\n except ValueError:\n pass # not found\n except KeyError:\n pass\n return self.find_by_name(id)", "def from_grain_file(grain_file_path, col_id=0, col_phi1=1, col_phi=2, col_phi2=3, col_x=4, col_y=5, col_z=None, col_volume=None):\n # get the file name without extension\n name = os.path.splitext(os.path.basename(grain_file_path))[0]\n print('creating microstructure %s' % name)\n micro = Microstructure(name=name)\n\n # read grain infos from the grain file\n grains_EBSD = np.genfromtxt(grain_file_path)\n for i in range(len(grains_EBSD)):\n o = Orientation.from_euler([grains_EBSD[i, col_phi1], grains_EBSD[i, col_phi], grains_EBSD[i, col_phi2]])\n g = Grain(int(grains_EBSD[i, col_id]), o)\n z = grains_EBSD[i, col_z] if col_z else 0.\n g.position = np.array([grains_EBSD[i, col_x], grains_EBSD[i, col_y], z])\n if col_volume:\n g.volume = grains_EBSD[i, col_volume]\n micro.grains.append(g)\n return micro", "def __readGrain(self, offset):\n sectorOffset = StreamVmdkMedia.__byteOffsetToSectorOffset(offset) #translate the offset in bytes to an offset in sectors\n grainOffset = StreamVmdkMedia.__sectorOffsetToGrainOffset(sectorOffset)\n \n if grainOffset == len(self.__fullGT):\n return self.__incompleteWrittenGrain + StreamVmdkMedia.__padToGrain(self.__incompleteWrittenGrain)\n fileLocation = self.__fullGT[ grainOffset ] * SECTOR_SIZE#get the location in the file where we can find the grain\n \n if fileLocation:\n self.__file.seek( fileLocation + UINT64_BYTE_SIZE)#set the file position to point to the data-length byte of the marker\n compressedLength = struct.unpack(\"=I\", self.__file.read(UINT32_BYTE_SIZE))[0]#extract the required number of bytes\n compressedData = self.__file.read( compressedLength )#read the compressed data\n uncompressedData = zlib.decompress(compressedData)\n if len(uncompressedData) != GRAIN_SIZE:\n logging.critical(\"len(Uncompressed grain) != GRAIN_SIZE\")\n raise VMDKStreamException(\"invalid/corrupted input file! (incorrect grain size)\")\n return uncompressedData#and since we still need to read at least a whole grain we can add all uncompressed data\n else:#null block: add one whole grain of nulls\n return StreamVmdkMedia.__zeroGrain", "def get(self, id):\n region = Region.query.filter_by(id=id).first()\n if region is None:\n return { 'message': 'Region does not exist'}, 404\n\n return region_schema.dump(region)", "def get_sensor(self,sensor_id):\n if sensor_id in self.sensors:\n return self.sensors[sensor_id]\n else: raise ValueError(\"No Sensor exists for id: {0}\".format(sensor_id))", "def globularswarms_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=GLOBULARSWARM_TYPE_URI,\n rdf_type_name=GLOBULARSWARM_TYPE_NAME, \n kls=Globularswarm)", "def load_texture_by_id( self, id, texture=None ):\n if id in (\n 'textures/common/hintskip',\n 'textures/common/clip',\n 'textures/common/botclip',\n 'textures/common/nodraw',\n 'textures/common/skip',\n 'textures/common/donotenter',\n 'textures/common/invisible',\n 'textures/common/trigger',\n ):\n return self.brush_class( [ ('surfaceparam','nodraw')] )\n if texture is None:\n texture = self.textures[id]\n relative = (b''.join( texture['filename'] )).decode('utf-8')\n \n img = None\n img = self.load_script( id, relative )\n if img is None:\n img = self._load_image_file( relative )\n if not img:\n log.warn( \"Unable to find Image #%s: %s\", id, relative )\n return img", "def get_node(self, id):\n for node in self.nodes:\n if node.id == id:\n return node\n else: raise ValueError()", "def read_by_id(_id):\n try:\n return Group.get(Group.id == _id)\n except Exception:\n return None", "def genelocations_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=GENELOCATION_TYPE_URI,\n rdf_type_name=GENELOCATION_TYPE_NAME, \n kls=GeneLocation)", "def get_sensor(id):\n query = {\"_id\": ObjectId(id)}\n if (sensor_db.count_documents(query)) < 1 :\n return make_response(\"sensor with id: \" + id + \" doesnt exist\", 404)\n else:\n x = sensor_db.find_one(query)\n x[\"_id\"] = str(x[\"_id\"])\n return make_response(json.dumps(x), 200)", "def load_grain(grains, k):\n grain = -np.ones(dims)\n ind = grains[k][0]-1\n [x, y, z] = np.unravel_index(ind, dims, order='F')\n val = grains[k][1]\n grain[y,x,z] = val\n verts, faces = measure.marching_cubes_classic(grain, 0, spacing=(1, 1, 1))\n return verts, faces", "def getById (id):\r\n if id in thingsById:\r\n return thingsById[id]\r\n else:\r\n return None", "def find_in_shapefile(self, shp, gardenid):\n try:\n return filter(lambda f: f['properties']['GARDENID'] == int(gardenid), shp)[0]\n except IndexError:\n return None", "def lookupRegion(self, _id):\n regionname=\"\"\n regionresult=self._regionsTable.loc[self._regionsTable[self.ID]==_id]\n\n if(len(regionresult)==1):\n regionname=regionresult[self.REGIONNAME].iloc[0]\n\n return(regionname)", "def handballleagues_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=HANDBALLLEAGUE_TYPE_URI,\n rdf_type_name=HANDBALLLEAGUE_TYPE_NAME, \n kls=HandballLeague)", "def get_object(id, session=None):\n if session is None:\n session = get_default_session()\n\n if isinstance(id, str) or isinstance(id, str):\n sim, ts, halo = id.split(\"/\")\n ts = get_timestep(sim + \"/\" + ts, session)\n return ts[halo]\n else:\n return session.query(SimulationObjectBase).filter_by(id=int(id)).first()", "def manhwas_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=MANHWA_TYPE_URI,\n rdf_type_name=MANHWA_TYPE_NAME, \n kls=Manhwa)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all te grain positions as a numpy array of shape (n, 3) where n is the number of grains.
def get_grain_positions(self): positions = np.empty((self.get_number_of_grains(), 3)) for i in range(self.get_number_of_grains()): positions[i] = self.grains[i].position return positions
[ "def positions(self):\n return get_positions(as_numpy=True).reshape((self.natom, 3))", "def flatten_fish_positions(self) -> np.ndarray:\n # todo: re-implement fish-index based slicing\n df_sel = self.positions_df[['x_pos', 'y_pos']]\n return df_sel.to_numpy()", "def get_positions(grofile, trajfile, atom_range):\n\n\n\n # traj = md.load_trr(trr, top=gro)\n traj = md.load(trajfile, top=grofile)\n\n # positions = []\n # u = MDAnalysis.Universe(gro, trr)\n # for frame in u.trajectory:\n # positions+= [frame.positions]\n # positions = np.array(positions)[:,atom_range[0]:atom_range[1]]\n \n \n # residues = traj.topology.residues\n\n # atoms = list(traj.topology.atoms) #[:num_atoms*nmol]\n \n # atoms = np.compress([not ('-W' in str(atom)) and not ('ION' in str(atom)) for atom in atoms], atoms, axis=0)\n \n\n positions = traj.xyz[:,atom_range[0]:atom_range[1]]\n\n\n return positions", "def returnGridAsArrayOfCoordinates(self):\n returnCoordinates = self.__returnCoordinatesReshaped((self.gridContainer['gridLength'], self.nVar))\n\n return returnCoordinates", "def _getPtychographyPositions(self) -> np.ndarray:\n\n p1 = self._scan_params.scan_area_buffer_npix\n p2 = self._probe_params.npix - p1 - self._obj_params.obj_w_border_npix\n positions_x = np.arange(p1, p2, self._scan_params.scan_step_npix)\n positions = []\n\n for r in positions_x:\n for c in positions_x:\n positions.append([r,c])\n return np.array(positions)", "def coordinates(self):\n\n nAtoms = len(self.atoms)\n coordinates = np.zeros((nAtoms,3))\n for i in range(nAtoms):\n coordinates[i,:] = self.atoms[i].coordinates()\n return coordinates", "def returnGridAsArrayOfCoordinates(cls):", "def get_occupied_positions(self):\n positions = []\n for x in range(self.width):\n for y in range(self.height):\n if self.isOccupied((x,y)):\n positions.append((x,y))\n return positions", "def calculate_vertices(self):\n scale = self.__scale\n array = [\n (0 * scale, -1 / math.sqrt(3) * scale, 0 * scale),\n (0.5 * scale, 1 / (2 * math.sqrt(3)) * scale, 0 * scale),\n (-0.5 * scale, 1 / (2 * math.sqrt(3)) * scale, 0 * scale),\n (0 * scale, 0 * scale, math.sqrt(2 / 3) * scale)\n ]\n return array", "def get_vertices(self):\n\n if not self.sim_ref:\n from numpy import random\n fake_data = random.rand(10, 3)\n for d in fake_data:\n yield nparray([d[0], d[1], d[2], 1])", "def dimension_positions(self):\n return [dim.position for dim in self]", "def grain_offset(self) -> np.ndarray:\n return (\n (self.grain_0.thickness + self.translation_vec[2])\n / self.lattice.matrix[2, 2]\n * self.lattice.matrix[2]\n )", "def positions(self):\n x_positions, y_positions = self.positions_1d\n return numpy.meshgrid(x_positions, y_positions)", "def _get_positions(self):\n positions = ''\n for rotor in self.rotors:\n positions += rotor.get_position()\n return positions", "def position(self):\n\t\t\n\t\tx_all,y_all,z_all = list(),list(),list()\n\t\tfor ant in self.antennas:\n\t\t\tx,y,z = ant.position\n\t\t\tx_all.append(x)\n\t\t\ty_all.append(y)\n\t\t\tz_all.append(z)\n\t\t\n\t\treturn (x_all,y_all,z_all)", "def __make_position_list(self):\n res = list()\n for i in range(self.board_size):\n for j in range(self.board_size):\n res.append((i, j))\n np.random.shuffle(res)\n return res", "def listof_positions(self):\n l = []\n for rnum in range(self.rnum_min, self.rnum_max+1):\n if rnum in self._pieces:\n l.append(rnum)\n return l", "def geometric_rows(self):\n return self.info.detector_array.spaxel_rows", "def get_coords(self):\n\t\treturn self.x, self.y, self.z" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute all grains volume fractions.
def get_grain_volume_fractions(self): total_volume = 0. for g in self.grains: total_volume += g.volume return [g.get_volume_fraction(total_volume) for g in self.grains]
[ "def get_grain_volume_fraction(self, gid, use_total_volume_value=None):\n # compute the total volume\n if use_total_volume_value:\n volume = use_total_volume_value\n else:\n # sum all the grain volume to compute the total volume\n volume = 0.\n for g in self.grains:\n volume += g.volume\n return self.get_grain(gid).get_volume_fraction(volume)", "def volume_fraction(self, x):\r\n\t\tvol = np.mean(x)\r\n\t\tself.dv[:] = 1.0 / (self.nelx * self.nely)\r\n\t\treturn vol, self.dv", "def calculate_volume(self):\r\n \r\n # Calculate the volume\r\n self.volume = math.pi * self.radius**2 * self.height", "def volume(self) -> float:\n return sum(block.volume for block in self if block)", "def volume(self):\n\t\treturn (4/3) * PI * power(self.r, 3)", "def volume(self):\n return 4 / 3 * math.pi * (self.radius ** 3)", "def volume(self):\n volume = []\n for dv in (self.red, self.green, self.blue, self.alpha):\n if dv.volume.dtype != np.uint8:\n vol = dv.volume.astype(\"float32\", copy=True)\n if dv.vmin is None:\n if vol.min() < 0:\n vol -= vol.min()\n else:\n vol -= dv.vmin\n\n if dv.vmax is None:\n if vol.max() > 1:\n vol /= vol.max()\n else:\n vol /= dv.vmax - dv.vmin\n\n vol = (np.clip(vol, 0, 1) * 255).astype(np.uint8)\n else:\n vol = dv.volume.copy()\n volume.append(vol)\n\n return np.array(volume).transpose([1, 2, 3, 4, 0])", "def total_volume(self) :\n total_vol = 0\n all_vols = []\n # upated 2014-01-21 for compatibility with new btstructs2\n for node in self._all_nodes :\n n = node.content['p3d']\n if not node.index in (1,2,3) :\n p = node.parent.content['p3d']\n H = np.sqrt(np.sum((n.xyz-p.xyz)**2))\n vol = np.pi*n.radius*n.radius*H\n all_vols.append(vol)\n total_vol = total_vol + vol\n return total_vol, all_vols", "def calc_volume(self:Tensor):\n x,y,z = 1,1,1\n voxel_size = x*y*z\n self.volume = {'background': self._calc_vol_per_class(0, voxel_size)}\n self.volume['total_mask_volume'] = self.size(0)*self.size(1)*self.size(2)*voxel_size - self.volume['background']\n for c in self.unique()[1:]:\n name = 'class '+str(int(c))\n self.volume[name] = self._calc_vol_per_class(c, voxel_size)\n #print(self.volume)\n return self.volume[\"class 1\"]", "def get_volume_fraction(input_params):\n\n vol_A =input_params['N_A']*(estimate_chain_volume(input_params['seq_A'],input_params))/float(pow(input_params['L'],3));\n vol_B = input_params['N_B']*(estimate_chain_volume(input_params['seq_B'],input_params))/float(pow(input_params['L'],3));\n vol_C = input_params['N_C']*(estimate_chain_volume(input_params['seq_C'],input_params))/float(pow(input_params['L'],3));\n\n return (vol_A,vol_B,vol_C)", "def get_volume(self):\n\t\treturn 2 * power(PI, 2) * self.b * self.c * self.r", "def multiple_spheres_volume(radius: float, num_spheres: int) -> float:\n\n #Your code here", "def average_volume(self):\n volumes = []\n for i in self._klines:\n volumes.append(int(i.get(\"volume\")))\n return float(np.mean(volumes))", "def get_frac_in_states(self):\n nested = [t.get_states() for t in self.thick]\n xb_states = [xb for fil in nested for face in fil for xb in face]\n num_in_state = [xb_states.count(state) for state in range(3)]\n frac_in_state = [n / float(len(xb_states)) for n in num_in_state]\n return frac_in_state", "def volume_average(self, name):\r\n # Check for precomputed integral\r\n try:\r\n integral_name = '_{}_integral'.format(name)\r\n integral_field = self.properties[integral_name]\r\n except KeyError:\r\n # Compute volume integral\r\n field = self.properties[name]\r\n integral_op = operators.integrate(field)\r\n integral_field = integral_op.evaluate()\r\n # Communicate integral value to all processes\r\n integral_value = self.reducer.global_max(integral_field['g'])\r\n average_value = integral_value / self.solver.domain.hypervolume\r\n return average_value", "def phase_space_volume(self) -> float:", "def volume(self):\n if self._volume is None:\n i = self._vertices[2, :] - self._vertices[1, :]\n j = self._vertices[3, :] - self._vertices[1, :]\n k = self._vertices[5, :] - self._vertices[1, :]\n sys = np.array([i, j, k])\n self._volume = abs(np.linalg.det(sys))\n return self._volume", "def get_volume(self):\n\t\treturn abs(inner(cross(self.a, self.b), self.h))/2", "def to_volume(slices):\n volume = np.stack([s.pixel_array for s in slices])\n volume = volume.astype(np.int16)\n\n # Set outside-of-scan pixels to 0\n # The intercept is usually -1024, so air is approximately 0\n volume[volume == -2000] = 0\n\n # Convert to Hounsfield units (HU)\n for n in range(len(slices)):\n intercept = slices[n].RescaleIntercept\n slope = slices[n].RescaleSlope\n if slope != 1:\n volume[n] = slope * volume[n].astype(np.float64)\n volume[n] = volume[n].astype(np.int16)\n volume[n] += np.int16(intercept)\n\n volume = np.array(volume, dtype=np.int16)\n spacing = tuple(map(float, ([slices[0].SliceThickness] + slices[0].PixelSpacing)))\n return volume, spacing" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the grain volume fraction.
def get_grain_volume_fraction(self, gid, use_total_volume_value=None): # compute the total volume if use_total_volume_value: volume = use_total_volume_value else: # sum all the grain volume to compute the total volume volume = 0. for g in self.grains: volume += g.volume return self.get_grain(gid).get_volume_fraction(volume)
[ "def get_grain_volume_fractions(self):\n total_volume = 0.\n for g in self.grains:\n total_volume += g.volume\n return [g.get_volume_fraction(total_volume) for g in self.grains]", "def volume_fraction(self, x):\r\n\t\tvol = np.mean(x)\r\n\t\tself.dv[:] = 1.0 / (self.nelx * self.nely)\r\n\t\treturn vol, self.dv", "def volume(self):\n\t\treturn (4/3) * PI * power(self.r, 3)", "def volume(self):\n return 4 / 3 * math.pi * (self.radius ** 3)", "def calculate_volume(self):\r\n \r\n # Calculate the volume\r\n self.volume = math.pi * self.radius**2 * self.height", "def phase_space_volume(self) -> float:", "def get_volume(self):\n\t\treturn 2 * power(PI, 2) * self.b * self.c * self.r", "def get_volume(self):\n\t\treturn abs(inner(cross(self.a, self.b), self.h))/2", "def volume(self) -> float:\n return sum(block.volume for block in self if block)", "def pore_volume_conversion(self):\n pv_factor = (abs(self.__reader.uy) * self.__reader.velocity_factor) /\\\n (self.__reader.ylen * self.resolution)\n return pv_factor", "def volume(self):\n if self._volume is None:\n i = self._vertices[2, :] - self._vertices[1, :]\n j = self._vertices[3, :] - self._vertices[1, :]\n k = self._vertices[5, :] - self._vertices[1, :]\n sys = np.array([i, j, k])\n self._volume = abs(np.linalg.det(sys))\n return self._volume", "def volume_final(self) -> float:\n return round(self.concentration_initial * self.parameters.volume_initial / self.parameters.concentration_final, 2)", "def calcular_volume_esfera(r):\n volume = 4/3 * PI * (r ** 3)\n return volume", "def volume_level(self):\n return .5", "def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):\n # The code of this function is extracted from PySpark RDD counterpart at\n # https://spark.apache.org/docs/1.5.0/api/python/_modules/pyspark/rdd.html\n #\n # Licensed to the Apache Software Foundation (ASF) under one or more\n # contributor license agreements. See the NOTICE file distributed with\n # this work for additional information regarding copyright ownership.\n # The ASF licenses this file to You under the Apache License, Version 2.0\n # (the \"License\"); you may not use this file except in compliance with\n # the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n #\n fraction = float(sampleSizeLowerBound) / total\n if withReplacement:\n numStDev = 5\n if sampleSizeLowerBound < 12:\n numStDev = 9\n return fraction + numStDev * math.sqrt(fraction / total)\n\n delta = 0.00005\n gamma = -math.log(delta) / total\n return min(1, fraction + gamma + math.sqrt(gamma * gamma + 2 * gamma * fraction))", "def pyramid_volume(base, height):\n return 1.0/3.0 * base * height", "def getVolume(self) -> \"float\":\n return _coin.SbBox3f_getVolume(self)", "def volume(self):\n return self.intrinsicValue(\"measuredvolume\")", "def get_fracture(self):\n return self.__fraction_alone" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Match grains from a second microstructure to this microstructure. This function try to find pair of grains based on their orientations.
def match_grains(self, micro2, mis_tol=1, use_grain_ids=None, verbose=False): if not self.get_lattice().get_symmetry() == micro2.get_lattice().get_symmetry(): raise ValueError('warning, microstructure should have the same symmetry, got: {} and {}'. format(self.get_lattice().get_symmetry(), micro2.get_lattice().get_symmetry())) candidates = [] matched = [] unmatched = [] # grain that were not matched within the given tolerance # restrict the grain ids to match if needed if use_grain_ids: grains_to_match = [self.get_grain(gid) for gid in use_grain_ids] else: grains_to_match = self.grains # look at each grain for i, g1 in enumerate(grains_to_match): cands_for_g1 = [] best_mis = mis_tol best_match = -1 for g2 in micro2.grains: # compute disorientation mis, _, _ = g1.orientation.disorientation(g2.orientation, crystal_structure=self.get_lattice().get_symmetry()) misd = np.degrees(mis) if misd < mis_tol: if verbose: print('grain %3d -- candidate: %3d, misorientation: %.2f deg' % (g1.id, g2.id, misd)) # add this grain to the list of candidates cands_for_g1.append(g2.id) if misd < best_mis: best_mis = misd best_match = g2.id # add our best match or mark this grain as unmatched if best_match > 0: matched.append([g1.id, best_match]) else: unmatched.append(g1.id) candidates.append(cands_for_g1) if verbose: print('done with matching') print('%d/%d grains were matched ' % (len(matched), len(grains_to_match))) return matched, candidates, unmatched
[ "def _isomorphism(g1, g2, top1, top2) -> Dict[str, str]:\n _iso_inv_map(g1)\n _iso_inv_map(g2)\n\n hypothesis: Dict[str, str] = {}\n agenda: List[Tuple[str, str]] = next(\n _iso_candidates({top1: None}, {top2: None}, g1, g2, hypothesis),\n [])\n return next(_iso_vf2(hypothesis, g1, g2, agenda), {})", "def identifyIndividual_4d(self, read1_seq, read2_seq):\n assigned = False\n ind = None\n idx1 = 0\n idx2 = 0\n AssignedPairsTwoTags = 0\n AssignedPairsOneTag = 0\n chimera = False\n \n lTagIds = self.patterns.keys()\n lTagIds.sort()\n for i in range(0, len(lTagIds)): # for each tag\n tagId = lTagIds[i]\n tmpRe1 = self.patterns[tagId].search(read1_seq[:self.dist])\n tmpRe2 = self.patterns[tagId].search(read2_seq[:self.dist])\n \n if tmpRe1 != None: # if first read has the pattern\n ind = tagId\n idx1 = tmpRe1.end() - self.lenRemainMotif\n if tmpRe2 != None: # if second read also has the pattern\n assigned = True\n idx2 = tmpRe2.end() - self.lenRemainMotif\n AssignedPairsTwoTags += 1\n else: # if only the first read has the pattern\n for tagId2 in self.patterns:\n if tagId2 == tagId:\n continue\n tmpRe2chim = self.patterns[tagId2].search(read2_seq[:self.dist])\n if tmpRe2chim != None: # if the first and second read have different tags \n chimera = True\n break\n if not chimera:\n assigned = True\n idx2 = idx1\n AssignedPairsOneTag += 1\n break\n \n elif tmpRe2 != None: # if only the second read has the pattern\n ind = tagId\n idx2 = tmpRe2.end() - self.lenRemainMotif\n for j in range(i+1, len(lTagIds)):\n tagId2 = lTagIds[j]\n tmpRe1chim = self.patterns[tagId2].search(read1_seq[:self.dist])\n if tmpRe1chim != None: # if the first and second read have different tags\n chimera = True\n break\n if not chimera:\n assigned = True\n AssignedPairsOneTag += 1\n idx1 = idx2\n break\n \n if not self.clipIdx:\n idx1 = idx2 = 0\n \n return assigned, tagId, idx1, idx2, AssignedPairsTwoTags, AssignedPairsOneTag, chimera", "def match(raster_in1,raster_in2):\n # Read rasterdata\n raster1 = gdal.Open(raster_in1)\n raster2 = gdal.Open(raster_in2)\n \n # load data\n band1 = raster1.GetRasterBand(1)\n band2 = raster2.GetRasterBand(1)\n gt1 = raster1.GetGeoTransform()\n gt2 = raster2.GetGeoTransform()\n \n # find each image's bounding box\n # r1 has left, top, right, bottom of dataset's bounds in geospatial coordinates.\n r1 = [gt1[0], gt1[3], gt1[0] + (gt1[1] * raster1.RasterXSize), gt1[3] + (gt1[5] * raster1.RasterYSize)]\n r2 = [gt2[0], gt2[3], gt2[0] + (gt2[1] * raster2.RasterXSize), gt2[3] + (gt2[5] * raster2.RasterYSize)]\n print('\\t1 bounding box: %s' % str(r1))\n print('\\t2 bounding box: %s' % str(r2))\n \n # find intersection between bounding boxes\n intersection = [max(r1[0], r2[0]), min(r1[1], r2[1]), min(r1[2], r2[2]), max(r1[3], r2[3])]\n if r1 != r2:\n print('\\t** different bounding boxes **')\n # check for any overlap at all...\n if (intersection[2] < intersection[0]) or (intersection[1] < intersection[3]):\n intersection = None\n print('\\t***no overlap***')\n return\n else:\n print('\\tintersection:',intersection)\n left1 = int(round((intersection[0]-r1[0])/gt1[1])) # difference divided by pixel dimension\n top1 = int(round((intersection[1]-r1[1])/gt1[5]))\n col1 = int(round((intersection[2]-r1[0])/gt1[1])) - left1 # difference minus offset left\n row1 = int(round((intersection[3]-r1[1])/gt1[5])) - top1\n \n left2 = int(round((intersection[0]-r2[0])/gt2[1])) # difference divided by pixel dimension\n top2 = int(round((intersection[1]-r2[1])/gt2[5]))\n col2 = int(round((intersection[2]-r2[0])/gt2[1])) - left2 # difference minus new left offset\n row2 = int(round((intersection[3]-r2[1])/gt2[5])) - top2\n \n #print '\\tcol1:',col1,'row1:',row1,'col2:',col2,'row2:',row2\n if col1 != col2 or row1 != row2:\n print(\"ERROR: Columns and rows still do not match! ***\")\n # these arrays should now have the same spatial geometry though NaNs may differ\n array1 = band1.ReadAsArray(left1,top1,col1,row1)\n array2 = band2.ReadAsArray(left2,top2,col2,row2)\n\n else: # same dimensions from the get go\n col1 = raster1.RasterXSize # = col2\n row1 = raster1.RasterYSize # = row2\n array1 = band1.ReadAsArray()\n array2 = band2.ReadAsArray()\n \n return array1, array2, intersection", "def match_spectra_to_base_and_merge_duplicates(specs, base, debug=None, matching_order=None):\n\n if \"coord\" in specs.colnames:\n del specs[\"coord\"] # because \"coord\" breaks \"sort\"\n specs.sort([\"ZQUALITY_sort_key\", \"SPEC_Z\"])\n\n specs = add_skycoord(specs)\n base = add_skycoord(base)\n specs_idx, base_idx, sep, _ = search_around_sky(\n specs[\"coord\"],\n base[\"coord\"],\n 20.0 * astropy.units.arcsec, # pylint: disable=E1101\n )\n sep = sep.arcsec\n\n # in case future astropy does not preserve the order of `specs_idx`\n if (np.ediff1d(specs_idx) < 0).any():\n sorter = specs_idx.argsort()\n specs_idx = specs_idx[sorter]\n base_idx = base_idx[sorter]\n sep = sep[sorter]\n del sorter\n\n # matched_idx will store the index of the matched photo obj.\n specs[\"matched_idx\"] = -1\n\n matching_order = matching_order or SPEC_MATCHING_ORDER\n\n if len(specs_idx):\n for group_slice in group_by(specs_idx, True):\n spec_idx_this = specs_idx[group_slice.start]\n possible_match = base[base_idx[group_slice]]\n possible_match[\"sep\"] = sep[group_slice]\n possible_match[\"sep_norm\"] = calc_normalized_dist(\n specs[\"RA\"][spec_idx_this],\n specs[\"DEC\"][spec_idx_this],\n possible_match[\"RA\"],\n possible_match[\"DEC\"],\n possible_match[\"radius_for_match\"],\n possible_match[\"ba\"] if \"ba\" in possible_match.colnames else None,\n possible_match[\"phi\"] if \"phi\" in possible_match.colnames else None,\n multiplier=1,\n )\n possible_match[\"SPEC_Z\"] = specs[\"SPEC_Z\"][spec_idx_this]\n\n # using following criteria one by one to find matching photo obj, stop when found\n for q, sorter in matching_order:\n mask = q.mask(possible_match)\n if mask.any():\n possible_match_this = possible_match[mask]\n matched_base_idx = possible_match_this[\"index\"][possible_match_this[sorter].argmin()]\n specs[\"matched_idx\"][spec_idx_this] = matched_base_idx\n break\n\n # now each photo obj can potentially have more than one spec matched to it\n # so for each photo obj that has one or more specs, we will merge the specs\n\n if \"coord\" in specs.colnames:\n del specs[\"coord\"]\n specs.sort([\"matched_idx\", \"ZQUALITY_sort_key\", \"SPEC_Z\"])\n\n specs[\"index\"] = np.arange(len(specs))\n specs[\"SPEC_REPEAT\"] = get_empty_str_array(len(specs), 48)\n specs[\"SPEC_REPEAT_ALL\"] = get_empty_str_array(len(specs), 48)\n specs[\"OBJ_NSAID\"] = np.int32(-1)\n specs[\"chosen\"] = False\n\n def get_tel_rank(\n tel,\n ranks=(\"MMT\", \"AAT\", \"PAL\", \"BINO\", \"NSA\", \"_OTHERS\", \"SDSS\", \"ALFALF\", \"WIYN\"),\n ):\n try:\n return ranks.index(tel)\n except ValueError:\n return ranks.index(\"_OTHERS\")\n\n for group_slice in group_by(specs[\"matched_idx\"], True):\n # matched_idx < 0 means there is no match, so nothing to do\n if specs[\"matched_idx\"][group_slice.start] < 0:\n continue\n\n # stop - start == 1 means there is only one match, so it's easy\n if group_slice.stop - group_slice.start == 1:\n i = group_slice.start\n specs[\"chosen\"][i] = True\n specs[\"SPEC_REPEAT\"][i] = specs[\"TELNAME\"][i]\n specs[\"SPEC_REPEAT_ALL\"][i] = specs[\"TELNAME\"][i]\n if specs[\"TELNAME\"][i] == \"NSA\":\n specs[\"OBJ_NSAID\"][i] = int(specs[\"SPECOBJID\"][i])\n continue\n\n # now it's the real thing, we have more than one specs\n # we design a rank for each spec, using ZQUALITY, TELNAME, and SPEC_Z_ERR\n specs_to_merge = specs[group_slice]\n rank = np.fromiter(map(get_tel_rank, specs_to_merge[\"TELNAME\"]), np.int32, len(specs_to_merge))\n rank += (10 - specs_to_merge[\"ZQUALITY\"]) * (rank.max() + 1)\n rank = rank.astype(np.float32) + np.where(\n Query((np.isfinite, \"SPEC_Z_ERR\"), \"SPEC_Z_ERR > 0\", \"SPEC_Z_ERR < 1\").mask(specs_to_merge),\n specs_to_merge[\"SPEC_Z_ERR\"],\n 0.99999,\n )\n specs_to_merge = specs_to_merge[rank.argsort()]\n best_spec = specs_to_merge[0]\n\n # We now check if there is any spec that is not at the same redshift as the best spec (mask_within_dz).\n # If so, and those specs are good or as good as the best spec (mask_same_zq_class),\n # and those specs are at least 0.5 arcsec away (mask_coord_offset),\n # then, we push them out of this merge process (to_rematch).\n mask_within_dz = np.fabs(specs_to_merge[\"SPEC_Z\"] - best_spec[\"SPEC_Z\"]) < 150.0 / SPEED_OF_LIGHT\n\n mask_same_zq_class = (specs_to_merge[\"ZQUALITY_sort_key\"] == best_spec[\"ZQUALITY_sort_key\"]) | (\n specs_to_merge[\"ZQUALITY\"] >= 3\n )\n\n mask_coord_offset = (\n SkyCoord(specs_to_merge[\"RA\"], specs_to_merge[\"DEC\"], unit=\"deg\")\n .separation(SkyCoord(best_spec[\"RA\"], best_spec[\"DEC\"], unit=\"deg\"))\n .arcsec\n > 0.5\n )\n\n if ((~mask_within_dz) & mask_coord_offset & mask_same_zq_class).any():\n to_rematch = (~mask_within_dz) & mask_coord_offset\n specs[\"matched_idx\"][specs_to_merge[\"index\"][to_rematch]] = -2 # we will deal with these -2 later\n specs_to_merge = specs_to_merge[~to_rematch]\n mask_same_zq_class = mask_same_zq_class[~to_rematch]\n mask_within_dz = mask_within_dz[~to_rematch]\n\n # so now specs_to_merge has specs that are ok to merge\n # we need to find if there's NSA objects and also get SPEC_REPEAT and put those info on best spec\n best_spec_index = best_spec[\"index\"]\n specs[\"chosen\"][best_spec_index] = True\n specs[\"SPEC_REPEAT\"][best_spec_index] = \"+\".join(\n set(specs_to_merge[\"TELNAME\"][mask_within_dz & mask_same_zq_class])\n )\n specs[\"SPEC_REPEAT_ALL\"][best_spec_index] = \"+\".join(set(specs_to_merge[\"TELNAME\"]))\n\n nsa_specs = specs_to_merge[specs_to_merge[\"TELNAME\"] == \"NSA\"]\n specs[\"OBJ_NSAID\"][best_spec_index] = int(nsa_specs[\"SPECOBJID\"][0]) if len(nsa_specs) else -1\n if len(nsa_specs) > 1:\n logging.warning(\n \"More than one NSA obj near ({}, {}): {}\".format(\n nsa_specs[\"RA\"][0],\n nsa_specs[\"DEC\"][0],\n \", \".join(nsa_specs[\"SPECOBJID\"]),\n )\n )\n\n # print out warnings for unmatched good specs\n for spec in Query(\"matched_idx == -1\", \"ZQUALITY >= 3\").filter(specs):\n if spec[\"TELNAME\"] in (\n \"AAT\",\n \"MMT\",\n \"BINO\",\n \"IMACS\",\n \"WIYN\",\n \"NSA\",\n \"PAL\",\n \"SALT\",\n ):\n logging.warning(\n \"No photo obj matched to {0[TELNAME]} spec {0[MASKNAME]} {0[SPECOBJID]} ({0[RA]}, {0[DEC]})\".format(\n spec\n )\n )\n\n if debug is not None:\n for i in count():\n key = \"specs_matching_{}\".format(i)\n if key not in debug:\n debug[key] = specs.copy()\n break\n\n # return both matched specs and specs that need to be rematched (those -2's)\n return Query(\"chosen\").filter(specs), Query(\"matched_idx == -2\").filter(specs)", "def connect_regions(self) -> None:\n for i, room in enumerate(self.rooms):\n x, y = room.center\n connector_regions = {}\n for y in range(0, self.current_map_height - 1):\n for x in range(0, self.current_map_width - 1):\n if self.current_map[x][y].block_path:\n regions = set()\n for direction in DungeonGenerator.directions:\n dx, dy = direction\n region = self.regions[x + dx][y + dy]\n if region != 0:\n regions.add(region)\n if len(regions) >= 2:\n connector_regions[(x, y)] = regions\n\n connectors = list(connector_regions)\n merged = {}\n open_regions = set()\n for i in range(1, self.current_region + 1):\n merged[i] = i\n open_regions.add(i)\n\n while len(open_regions) > 1:\n if len(connectors) == 0:\n return\n connector = random.choice(connectors)\n self.add_junction(connector, self.tile_texture)\n\n regions = map(lambda i: merged[i], connector_regions[connector])\n dest = next(regions)\n sources = set(regions)\n if dest in sources:\n sources.remove(dest)\n\n for i in range(1, self.current_region + 1):\n if merged[i] in sources:\n merged[i] = dest\n\n for source in sources:\n if source in open_regions:\n open_regions.remove(source)\n\n for conn in connectors:\n c_x, c_y = conn\n x, y = connector\n regions = set(map(lambda reg: merged[reg], connector_regions[(c_x, c_y)]))\n\n if abs(c_x - x) < 2 and abs(c_y - y) < 2:\n connectors.remove(conn)\n continue\n\n if len(regions) > 1:\n continue\n\n if random.randint(1, 100) < self.extra_connector_chance:\n self.add_junction(conn, self.tile_texture)\n\n if conn in connectors:\n connectors.remove(conn)", "def twomass_crossmatch(gaia_cat, gaia_2mass, gaia_2mass_crossref, twomass_cat):\n ntable1 = len(gaia_cat['ra'])\n ntable2 = len(gaia_2mass['ra'])\n ntable3 = len(gaia_2mass_crossref['ra'])\n ntable4 = len(twomass_cat['ra'])\n ngaia2mass = np.zeros((ntable2), dtype=np.int16) - 10\n ngaia2masscr = np.zeros((ntable4), dtype=np.int16) - 10\n for loop in range(ntable2):\n # find the number of entries of each 2MASS source in the cross references\n nmatch = 0\n namematch = []\n match1 = []\n for l1 in range(ntable3):\n if gaia_2mass['DESIGNATION'][loop] == gaia_2mass_crossref['DESIGNATION'][l1]:\n nmatch = nmatch + 1\n namematch.append(gaia_2mass_crossref['designation'][l1])\n match1.append(loop)\n # Find the matching GAIA sources and select the one with the best\n # magnitude match within a radius of 0.3 arc-seconds of the 2MASS\n # position.\n magkeys = ['j_m', 'h_m', 'ks_m']\n if nmatch > 0:\n mindelm = 10000.0\n ncross = -10\n for l1 in range(nmatch):\n for l2 in range(ntable1):\n gmag = 0.\n irmag = -10000.0\n if gaia_cat['DESIGNATION'][l2] == namematch[l1]:\n ra1 = gaia_cat['ra'][l2]\n dec1 = gaia_cat['dec'][l2]\n ra2 = gaia_2mass['ra'][match1[l1]]\n dec2 = gaia_2mass['dec'][match1[l1]]\n p1 = SkyCoord(ra1*u.deg, dec1*u.deg)\n p2 = SkyCoord(ra2*u.deg, dec2*u.deg)\n if p2.separation(p1).arcsec < 0.5:\n gmag = gaia_cat['phot_g_mean_mag'][l2]\n # select 2MASS magnitude: first ph_qual = A or if none\n # is of quality A the first ph_qual = B or if none is\n # of quality A or B then the first non U value.\n magval = gaia_2mass['ph_qual'][match1[l1]]\n if isinstance(magval, str):\n qual = magval[0:3]\n else:\n qual = magval.decode()[0:3]\n\n if (irmag < -100.):\n a_pos = qual.find('A')\n if a_pos != -1:\n irmag = gaia_2mass[magkeys[a_pos]][match1[l1]]\n else:\n b_pos = qual.find('B')\n if b_pos != -1:\n irmag = gaia_2mass[magkeys[b_pos]][match1[l1]]\n else:\n non_u_pos = re.search(r'[^U]', qual)\n if non_u_pos is not None:\n irmag = gaia_2mass[magkeys[non_u_pos.start()]][match1[l1]]\n\n delm = gmag - irmag\n if (delm > -1.2) and (delm < 30.0):\n if delm < mindelm:\n ncross = l2\n mindelm = delm\n ngaia2mass[loop] = ncross\n # Now locate the 2MASS sources in the IPAC 2MASS table, and put in the\n # index values.\n for loop in range(ntable4):\n for n1 in range(ntable2):\n if twomass_cat['designation'][loop] == gaia_2mass['DESIGNATION'][n1]:\n ngaia2masscr[loop] = ngaia2mass[n1]\n return ngaia2masscr, ngaia2mass", "def find_segues(g1, g2, pre_filtering=pre, post_filtering=post, nodes_types_to_filter=nodes_types_to_filter_strict, nodes_types_to_segue_not_equal=get_dict()):\n # First, merges g1 and g2 in a unique graph.\n # Then, find segues as paths from the source node of g1 and the source node of g2.\n # Then, filters out undesired nodes\n # Finally, converts paths to the dictionary form.\n\n # Efficient structure where to store the merged graph\n g = defaultdict(set)\n\n # Map back a tuple of nodes ids in g to a list of nodes in g1 (dictionary 0) and g2 (dictionary 1)\n # A series of identical nodes in g can be mapped to more nodes in one of the starting graphs, we are in a multigraph scenario.\n map_back = {'g1': {}, 'g2': {}}\n\n # Tells whether an edge in g was from g1 or g2 or\n # if it was induced, i.e. resulting from the application of a compare functio to nodes from g1 and g2\n edges = {'g1': set(), 'g2': set(), 'induced': set()}\n\n # An induced edge is added as the result of the application of a compare function to two nodes\n # In induced_edges_infos we store these information\n induced_edges_infos = defaultdict(list)\n\n # Here we merge graphs\n\n # Every node in g1 and g2 is represented by a string, which is the conversion of its fields to text (mergiable_id)\n # This automatically implements the equal compare function, as equal nodes will converge into the same node in g\n for idx, addend in enumerate((g1, g2)):\n id_sub_graph = f\"source_{idx}\"\n stack = [((f\"source_{idx}\",), iter(addend['source']))]\n while stack:\n children = stack[-1]\n child = next(children[1], None)\n if child is None:\n stack.pop()\n else:\n child_id = addend.nodes()[child]['mergiable_id']\n child_id += f\"__{idx}\" if addend.nodes()[child]['type'] in nodes_types_to_filter else \"\"\n\n if idx == 0:\n g[children[0][-1]].add(child_id)\n edges['g1'].add((children[0][-1], child_id))\n else:\n g[child_id].add(children[0][-1])\n edges['g2'].add((child_id, children[0][-1]))\n\n key = children[0]+(child_id,)\n if key in map_back[f'g{idx+1}']:\n map_back[f'g{idx+1}'][key].append(child)\n else:\n map_back[f'g{idx+1}'][key] = [child]\n\n stack.append((children[0]+(child_id,), iter(addend[child])))\n\n # Now we add edges stemming for compare functions different from equal\n compareble_nodes_without_equal = [k for k, v in nodes_types_to_segue_not_equal.items()]\n # Every key in d is a tuple of types, so broadcasting to type_1 and type_2\n for type_1, type_2 in compareble_nodes_without_equal:\n\n nodes_type_1 = [g1.nodes()[node_id] for node_id in g1.nodes() if g1.nodes()[node_id]['type'] == type_1]\n nodes_type_2 = [g2.nodes()[node_id] for node_id in g2.nodes() if g2.nodes()[node_id]['type'] == type_2]\n\n for compare_function in [f for f in d[(type_1, type_2)] if f.__name__ != 'equal']:\n\n nodes_type_1_filtered = [n for n in nodes_type_1 if pre(n, compare_function)]\n nodes_type_2_filtered = [n for n in nodes_type_2 if pre(n, compare_function)]\n\n for n1, n2 in itertools.product(nodes_type_1_filtered, nodes_type_2_filtered):\n result = compare_function(n1, n2)\n if result['outcome'] == True:\n\n # Add the edge\n id_1 = f\"{n1['mergiable_id']}__0\" if n1['type'] not in compareble_nodes_with_equal else n1['mergiable_id']\n id_2 = f\"{n2['mergiable_id']}__1\" if n2['type'] not in compareble_nodes_with_equal else n2['mergiable_id']\n g[id_1].add(id_2)\n edges['induced'].add((id_1, id_2))\n\n # Store the result of the compare function application in a dictionary\n result.pop('outcome')\n result['compare_function'] = compare_function.__name__\n induced_edges_infos[(n1['id'], n2['id'])].append(result)\n\n # Find paths in graph\n paths = list(_all_simple_paths_graph(g, 'source_0', {'source_1'}, 50))\n\n # Convert paths to dictionary-shaped segues\n segues = []\n\n # Find out which is the last node that belongs to g1 and which is the first that belongs to g2\n # middle_leg is len==2 tuple which has as values such information\n for j, path in enumerate(paths):\n for idx in range(2, len(path)):\n if tuple(path[:idx]) not in map_back['g1']:\n idx = idx-2\n middle_leg = (path[idx], path[idx+1])\n break\n\n if (tuple(path[idx:][::-1]) in map_back['g2']):\n # Compare function == equal\n for id_1, id_2 in itertools.product(map_back['g1'][tuple(path[0:idx+1])], map_back['g2'][tuple(path[idx:][::-1])]):\n\n segue = {'n1': g1._node[id_1],\n 'n2': g2._node[id_2],\n 'value': g1._node[id_1]['value'],\n 'compare_function': 'equal'}\n\n if check_filters(segue, pre_filtering, post_filtering) == True:\n segues.append(segue)\n\n elif middle_leg in edges['induced']:\n # Compare function != equal\n for id_1, id_2 in itertools.product(map_back['g1'][tuple(path[0:idx+1])], map_back['g2'][tuple(path[idx+1:][::-1])]):\n\n candidated_segues = iter([{**{'n1': g1._node[id_1], 'n2': g2._node[id_2]}, **induced_edge_infos}\n for induced_edge_infos in induced_edges_infos[(id_1, id_2)]])\n\n for segue in candidated_segues:\n if check_filters(segue, pre_filtering, post_filtering) == True:\n segues.append(segue)\n\n else:\n # spurious path to be discarded, valid segues enter either the if or elif branch\n pass\n\n return segues", "def analyze_shapes(buildings):\n print 'Analyzing shape similarity with cv2.matchShapes...'\n # num_buildings = len(buildings)\n shape_sim = {}\n for i in xrange(num_buildings):\n for j in xrange(i+1, num_buildings):\n cnt1 = buildings[i]['cnt']\n cnt2 = buildings[j]['cnt']\n ret = cv2.matchShapes(cnt1,cnt2, 1,0.0)\n shape_sim[(i,j)] = ret\n sorted_sim = sorted([(value,key) for (key,value) in shape_sim.items()])\n for sim in sorted_sim[:40]:\n bldg1 = sim[1][0]\n bldg2 = sim[1][1]\n print round(sim[0],4), '\\t', buildings[bldg1]['name'], '&', buildings[bldg2]['name']", "def rotationMatch(im_fname=r\"chickenbroth_01.jpg\"):\n # Load image\n im1 = cv2.imread(str(DATA_FOLDER / im_fname))\n # Fully perform SIFT detection and BRIEF description\n locs1, desc1 = briefLite(im1)\n # Setting up rotation\n im_r, im_c, _ = im1.shape\n print(f\"Rotation match fo image {im_fname}:\")\n degs = np.arange(10 , 90 , 10)\n for deg in degs:\n # Rotating the image\n rot_mat = cv2.getRotationMatrix2D((im_c/2,im_r/2),deg,1)\n im2 = cv2.warpAffine(im1,rot_mat,(im_c,im_r))\n # Fully perform SIFT detection and BRIEF description\n locs2 , desc2 = briefLite(im2)\n # Compute matches using the provided briefMatch function\n matches = briefMatch(desc1, desc2, ratio=1)\n\n # Display matched points using the provided plotMatches function\n out = f\"Matches ({im_fname} at {deg} degrees), ratio={1}\"\n fig = plotMatchesJet(im1, im2, matches, locs1, locs2)\n fig.axes[0].set_title(out)\n fig.savefig(OUTPUT_FOLDER / f\"{out}_{deg}.png\", bbox_inches=\"tight\", pad_inches=0)\n plt.close()\n\n\n\n return None", "def alignImages(im1, im2):\n #adjust max_features and good_match_percents\n #print('Max features', MAX_FEATURES)\n #print('Using % good', GOOD_MATCH_PERCENT)\n #GOOD_MATCH_PERCENT = 0.15\n #print('Saved Homography:', H_SHORT)\n \n # Detect ORB features and compute descriptors.\n orb = cv2.ORB_create(MAX_FEATURES)\n keypoints1, descriptors1 = orb.detectAndCompute(im1, None)\n keypoints2, descriptors2 = orb.detectAndCompute(im2, None)\n\n # Match features.\n matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)\n try:\n matches = matcher.match(descriptors1, descriptors2, None)\n except:\n matches = None\n #print('could not match features')\n \n if matches is not None:\n # Sort matches by likelihood of being a match.\n matches.sort(key=lambda x: x.distance, reverse=False)\n\n # Remove not so good matches.\n numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)\n matches = matches[:numGoodMatches]\n\n # Draw top matches.\n imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)\n #cv2.imshow('Matches', imMatches)\n #cv2.waitKey(2000)\n cv2.imwrite(\"/home/pi/PlantHealth/SavedImages/matches.jpg\", imMatches)\n \n # Extract location of good matches.\n points1 = np.zeros((len(matches), 2), dtype=np.float32)\n points2 = np.zeros((len(matches), 2), dtype=np.float32)\n\n for i, match in enumerate(matches):\n points1[i, :] = keypoints1[match.queryIdx].pt\n points2[i, :] = keypoints2[match.trainIdx].pt\n\n # Find homography or use a stored version in case of emergency\n # or if the homography seems inaccurate.\n try:\n h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)\n except:\n h = H_SHORT #using stored homography\n #print('Using saved homography h:', h)\n finally:\n # Use homography.\n if h is None or not (np.allclose(h, H_SHORT, rtol=0, atol=0.5) \n or np.allclose(h, H, rtol=0, atol=0.5)):\n h = H_SHORT\n #print('Using saved homography h:', h)\n #print('Using homography h:', repr(h))\n height, width, channels = im2.shape\n im1Reg = cv2.warpPerspective(im1, h, (width, height))\n else:\n im1Reg = im1 #could not register the image\n return im1Reg", "def get_matching_subgraph_pairs(\n gm_a: GraphModule,\n gm_b: GraphModule,\n) -> Dict[str, Tuple[NSSubgraph, NSSubgraph]]:\n non_matchable_functions = get_non_matchable_functions()\n non_matchable_modules = get_non_matchable_modules()\n graph_a_iterator = _NSGraphMatchableSubgraphsIterator(\n gm_a, non_matchable_functions, non_matchable_modules)\n graph_b_iterator = _NSGraphMatchableSubgraphsIterator(\n gm_b, non_matchable_functions, non_matchable_modules)\n results = {}\n base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()\n type_a_related_to_b = \\\n get_type_a_related_to_b(base_name_to_sets_of_related_ops)\n\n existing_names_a: Set[str] = set()\n existing_names_b: Set[str] = set()\n\n while True:\n # fetch the next subgraphs from a and b\n cur_subgraph_a, cur_subgraph_b = None, None\n try:\n cur_subgraph_a = next(graph_a_iterator)\n except StopIteration:\n pass\n try:\n cur_subgraph_b = next(graph_b_iterator)\n except StopIteration:\n pass\n\n # look up types of a and b for useful error messages\n type_start_a, type_start_b = None, None\n if cur_subgraph_a is not None:\n type_start_a = _get_node_target_type(cur_subgraph_a.start_node, gm_a) # type: ignore\n if cur_subgraph_b is not None:\n type_start_b = _get_node_target_type(cur_subgraph_b.start_node, gm_b) # type: ignore\n\n # check for results and determine what to do next\n if cur_subgraph_a is not None and cur_subgraph_b is not None:\n # both nodes were fetched, check for subgraph_relationship\n # note: subgraph_relationship is checked on the start node, i.e.\n # if a linear-relu pattern is checked, we would check for subgraph_relationship\n # of the linear\n subgraph_relationship = _get_subgraph_relationship_type(\n cur_subgraph_a, cur_subgraph_b,\n gm_a, gm_b, type_a_related_to_b)\n if subgraph_relationship == SugraphTypeRelationship.NOT_RELATED:\n msg = f\"\"\"\n({cur_subgraph_a}, {type_start_a}) and\n({cur_subgraph_b}, {type_start_b}) are not related\"\"\"\n raise GraphMatchingException(msg)\n elif subgraph_relationship == SugraphTypeRelationship.EQUAL:\n # For now, skip nodes with equal types. In the future, this can\n # be made configurable.\n continue\n key_name_a = _get_name_for_subgraph(\n cur_subgraph_a, gm_a, base_name_to_sets_of_related_ops,\n existing_names_a)\n key_name_b = _get_name_for_subgraph(\n cur_subgraph_b, gm_b, base_name_to_sets_of_related_ops,\n existing_names_b)\n assert key_name_a == key_name_b, \\\n f\"Subgraph names {key_name_a} and {key_name_b} do not match\"\n results[key_name_a] = (cur_subgraph_a, cur_subgraph_b)\n continue\n elif cur_subgraph_a is None and cur_subgraph_b is None:\n # we reached the end of both graphs\n break\n else:\n # only one node was fetched, no match possible, throw error\n msg = f\"\"\"\nMatchable nodes count mismatch: ({cur_subgraph_a}, {type_start_a}) and\n({cur_subgraph_b}, {type_start_b})\"\"\"\n raise GraphMatchingException(msg)\n\n return results", "def match_allele(allele, trim_boundary=False):\n # sort the variants\n variants = sorted(allele[\"variants\"], key=lambda x: x[\"g_notation\"])\n \n # trim the first and last variants if requested\n if trim_boundary:\n if len(variants) > 0 and \"ins\" in variants[0][\"g_notation\"]:\n variants = variants[1:]\n \n if len(variants) > 0 and \"ins\" in variants[-1][\"g_notation\"]:\n variants = variants[:-1]\n \n # set of variants found in the allele\n found_variants = set(v[\"g_notation\"] for v in variants)\n \n # known variants - intersection of found variants with the set of all reference variants\n known_variants = found_variants & reference_variants\n \n # novel variants found in allele but not found in the reference set\n novel_variants = found_variants - reference_variants\n \n # significant variants found in allele\n significant_variants = {s.g_notation for s in locus.snps if s.tags is not None and \\\n \"significant\" in s.tags} & found_variants\n \n # tag the observed variants \n for v in variants:\n if v[\"g_notation\"] in known_variants:\n v[\"tags\"].append(\"known\")\n \n if v[\"g_notation\"] in novel_variants:\n v[\"tags\"].append(\"novel\")\n \n if v[\"g_notation\"] in significant_variants:\n v[\"tags\"].append(\"significant\")\n \n # update the allele record\n allele[\"variants\"] = variants\n\n # get all haplotype matches for this allele\n characterised_allele = characterise_allele(found_variants, significant_variants)\n \n # filter to remove matches with zero overlap\n filtered_allele = [x for x in characterised_allele if x[\"fraction\"] > 0]\n \n # sort by number of significant variants, fraction and jaccard\n # this gives a list of haplotypes ordered by decreasing 'score'\n sorted_allele = sorted(filtered_allele, key=lambda x: (x[\"fraction\"], len(x[\"significant\"]), x[\"jaccard\"]), reverse=True)\n \n # add the haplotypes section\n allele[\"haplotypes\"] = sorted_allele\n \n return allele", "def map_hydrogens(spc_1: ARCSpecies,\n spc_2: ARCSpecies,\n backbone_map: Dict[int, int],\n ) -> Dict[int, int]:\n atom_map = backbone_map\n atoms_1, atoms_2 = spc_1.mol.atoms, spc_2.mol.atoms\n for hydrogen_1 in atoms_1:\n if hydrogen_1.is_hydrogen() and atoms_1.index(hydrogen_1) not in atom_map.keys():\n success = False\n heavy_atom_1 = list(hydrogen_1.edges.keys())[0]\n heavy_atom_2 = atoms_2[backbone_map[atoms_1.index(heavy_atom_1)]]\n num_hydrogens_1 = len([atom for atom in heavy_atom_1.edges.keys() if atom.is_hydrogen()])\n if num_hydrogens_1 == 1:\n # We know that num_hydrogens_2 == 1 because the candidate map resulted from are_adj_elements_in_agreement().\n hydrogen_2 = [atom for atom in heavy_atom_2.edges.keys() if atom.is_hydrogen()][0]\n atom_map[atoms_1.index(hydrogen_1)] = atoms_2.index(hydrogen_2)\n success = True\n # Consider 2/3/4 hydrogen atoms on this heavy atom.\n # 1. Check for a heavy atom with only H atoms adjacent to it (CH4, NH3, H2).\n if not success:\n if all(atom.is_hydrogen() for atom in heavy_atom_1.edges.keys()):\n for atom_1, atom_2 in zip([atom for atom in atoms_1 if atom.is_hydrogen()],\n [atom for atom in atoms_2 if atom.is_hydrogen()]):\n atom_map[atoms_1.index(atom_1)] = atoms_2.index(atom_2)\n success = True\n # 2. Check for a torsion involving heavy_atom_1 as a pivotal atom (most common case).\n if not success:\n if spc_1.rotors_dict is not None:\n heavy_atom_1_index = atoms_1.index(heavy_atom_1)\n for rotor_dict in spc_1.rotors_dict.values():\n if heavy_atom_1_index in [rotor_dict['torsion'][1], rotor_dict['torsion'][2]]:\n atom_map = add_adjacent_hydrogen_atoms_to_map_based_on_a_specific_torsion(\n spc_1=spc_1,\n spc_2=spc_2,\n heavy_atom_1=heavy_atom_1,\n heavy_atom_2=heavy_atom_2,\n torsion=rotor_dict['torsion'],\n atom_map=atom_map,\n find_torsion_end_to_map=True,\n )\n success = True\n break\n # 3. Check for a pseudo-torsion (may involve multiple bonds) with heavy_atom_1 as a pivot.\n if not success:\n pseudo_torsion = list()\n for atom_1_3 in heavy_atom_1.edges.keys():\n if atom_1_3.is_non_hydrogen():\n for atom_1_4 in atom_1_3.edges.keys():\n if atom_1_4.is_non_hydrogen() and atom_1_4 is not heavy_atom_1:\n pseudo_torsion = [atoms_1.index(atom) for atom in [hydrogen_1, heavy_atom_1, atom_1_3, atom_1_4]]\n break\n if not len(pseudo_torsion):\n # Compromise for a hydrogen atom in position 4.\n for atom_1_4 in atom_1_3.edges.keys():\n if atom_1_4 is not heavy_atom_1:\n pseudo_torsion = [atoms_1.index(atom) for atom in [hydrogen_1, heavy_atom_1, atom_1_3, atom_1_4]]\n break\n if len(pseudo_torsion):\n atom_map = add_adjacent_hydrogen_atoms_to_map_based_on_a_specific_torsion(\n spc_1=spc_1,\n spc_2=spc_2,\n heavy_atom_1=heavy_atom_1,\n heavy_atom_2=heavy_atom_2,\n torsion=pseudo_torsion[::-1],\n atom_map=atom_map,\n find_torsion_end_to_map=False,\n )\n success = True\n break\n # 4. Check by angles and bond lengths (search for 2 consecutive heavy atoms).\n if not success:\n atom_1_3, angle_1, bond_length_1 = None, None, None\n for atom_1_3 in heavy_atom_1.edges.keys():\n if atom_1_3.is_non_hydrogen():\n heavy_atom_1_index, hydrogen_1_index = atoms_1.index(heavy_atom_1), atoms_1.index(hydrogen_1)\n angle_1 = calculate_angle(coords=spc_1.get_xyz(),\n atoms=[atoms_1.index(atom_1_3), heavy_atom_1_index, hydrogen_1_index])\n bond_length_1 = calculate_distance(coords=spc_1.get_xyz(),\n atoms=[heavy_atom_1_index, hydrogen_1_index])\n break\n if atom_1_3 is not None:\n atom_2_3_index = atom_map[atoms_1.index(atom_1_3)]\n angle_deviations, bond_length_deviations, hydrogen_indices_2 = list(), list(), list()\n for hydrogen_2 in heavy_atom_2.edges.keys():\n if hydrogen_2.is_hydrogen() and atoms_2.index(hydrogen_2) not in atom_map.values():\n heavy_atom_2_index, hydrogen_2_index = atoms_2.index(heavy_atom_2), atoms_2.index(hydrogen_2)\n angle_2 = calculate_angle(coords=spc_2.get_xyz(),\n atoms=[atom_2_3_index, heavy_atom_2_index, hydrogen_2_index])\n bond_length_2 = calculate_distance(coords=spc_2.get_xyz(),\n atoms=[heavy_atom_2_index, hydrogen_2_index])\n angle_deviations.append(abs(angle_1 - angle_2))\n bond_length_deviations.append(abs(bond_length_1 - bond_length_2))\n hydrogen_indices_2.append(hydrogen_2_index)\n deviations = [bond_length_deviations[i] * hydrogen_indices_2[i] for i in range(len(angle_deviations))]\n atom_map[atoms_1.index(hydrogen_1)] = hydrogen_indices_2[deviations.index(min(deviations))]\n return atom_map", "def test_single_two_read_two_snp_two_chrom(self):\n \n test_data = Data(read1_seqs = [\"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\",\n \"GGGGGGGGGGGGGGGGGGGGGGGGGGGGGG\"],\n read1_quals = [\"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\",\n \"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\"],\n genome_seqs = [\"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\\n\" +\n \"TTTTTTTTTTATTTTTTTTTTTTTTTTTTT\",\n \"GGGGGGGGGGGGGGGGGGGGGGGGGGGGGG\\n\" +\n \"CCCCCCCCCCGCCCCCCCCCCCCCCCCCCC\"],\n chrom_names = ['test_chrom1', 'test_chrom2'],\n snp_list = [['test_chrom1', 1, \"A\", \"C\"],\n ['test_chrom2', 3, \"G\", \"C\"]])\n \n test_data.setup()\n test_data.index_genome_bowtie2()\n test_data.map_single_bowtie2()\n test_data.sam2bam()\n\n find_intersecting_snps.main(test_data.bam_filename,\n snp_dir=test_data.snp_dir, is_paired_end=False,\n is_sorted=False)\n\n #\n # Verify new fastq is correct. The first base of the first read\n # should be switched from C to an A, and the third base of second read\n # should be switched from C to G\n #\n with gzip.open(test_data.fastq_remap_filename) as f:\n lines = [x.strip() for x in f.readlines()]\n assert len(lines) == 8\n\n l = list(test_data.read1_seqs[0])\n l[0] = 'C'\n new_seq = \"\".join(l)\n assert lines[1] == new_seq\n assert lines[3] == test_data.read1_quals[0]\n\n l = list(test_data.read1_seqs[1])\n l[2] = 'C'\n new_seq = \"\".join(l)\n\n assert lines[5] == new_seq\n assert lines[7] == test_data.read1_quals[1]\n \n #\n # Verify to.remap bam is the same as the input bam file.\n #\n old_lines = read_bam(test_data.bam_filename)\n new_lines = read_bam(test_data.bam_remap_filename)\n assert old_lines == new_lines\n\n #\n # Verify that the keep file is empty since only\n # read needs to be remapped. Note that the\n # read_bam still gives back one empty line.\n #\n lines = read_bam(test_data.bam_keep_filename)\n assert len(lines) == 1\n assert lines[0] == ''\n\n test_data.cleanup()", "def connect_regions(self):\n # Find all of the tiles that can connect two regions\n connector_regions = [[None for y in range(self.mz_height)]\n for x in range(self.mz_width)]\n\n for x in range(1, self.mz_width-1):\n for y in range(1, self.mz_height-1):\n if not self.tiles[x][y].blocked:\n continue\n\n # Count the number of different regions the wall tile is touching\n regions = set()\n for direction in self.DIRECTIONS:\n new_x = x + direction[0]\n new_y = y + direction[1]\n region = self._regions[new_x][new_y]\n if region is not None:\n regions.add(region)\n\n # Tile must connect to least two regions\n if len(regions) >= 2:\n connector_regions[x][y] = regions\n\n # Make a list of all the connectors\n connectors = set()\n for x in range(0, self.mz_width):\n for y in range(0, self.mz_height):\n if connector_regions[x][y]:\n connector_position = (x, y)\n connectors.add(connector_position)\n\n # Keep track of which regions have been merged. This maps an original\n # region index to the one it has been merged to.\n merged = {}\n open_regions = set()\n for i in range(self._current_region+1):\n merged[i] = i\n open_regions.add(i)\n\n # Connect the regions until one is left\n while len(open_regions) > 1:\n connector = rd.choice(tuple(connectors)) # Get random connector\n\n # Carve the connection\n self.add_junction(connector)\n\n # merge the connected regions\n x = connector[0]\n y = connector[1]\n\n # make a list of the regions at (x,y)\n regions = []\n for n in connector_regions[x][y]:\n # get the regions in the form of merged[n]\n actual_region = merged[n]\n regions.append(actual_region)\n\n dest = regions[0]\n sources = regions[1:]\n\n # Merge all of the affected regions. We have to look at ALL of the\n # regions because other regions may have been previously merged\n # with some of the ones we're merging now.\n for i in range(self._current_region+1):\n if merged[i] in sources:\n merged[i] = dest\n\n # Clear the sources, they are no longer needed\n for source in sources:\n open_regions.remove(source)\n\n # Remove the unneeded connectors\n connectors_to_remove = set()\n for pos in connectors:\n # Remove connectors that are next to the current connector\n if self.distance(connector, pos) < 2:\n connectors_to_remove.add(pos)\n continue\n\n # Check if the connector still spans different regions\n regions = set()\n x = pos[0]\n y = pos[1]\n for n in connector_regions[x][y]:\n actual_region = merged[n]\n regions.add(actual_region)\n if len(regions) > 1:\n continue\n\n # This connector isn't needed, but connect it occaisonally so\n # that the dungeon isn't singly-connected\n if rd.random() < self.extra_connector_chance:\n self.add_junction(pos)\n\n if len(regions) == 1:\n connectors_to_remove.add(pos)\n\n connectors.difference_update(connectors_to_remove)", "def test_paired_two_reads_two_snps(self):\n test_data = Data()\n\n read1_seqs = [\"AACGAAAAGGAGAA\",\n \"AAAAAAATTTAAAA\"]\n read2_seqs = [\"AAGAAACAACACAA\",\n \"AAAAATAAAAAATA\"]\n \n read1_quals = [\"B\" * len(read1_seqs[0]),\n \"C\" * len(read1_seqs[1])]\n read2_quals = [\"D\" * len(read2_seqs[0]),\n \"E\" * len(read2_seqs[1])]\n\n # 10 20 30\n # POS 123456789012345678901234567890\n # read1[0] AACGAAAAGGAGAA\n # read1[1] AAAAAAATTTAAAA\n # SNP ^\n genome_seq = [\"AAAAAACGAAAAGGAGAAAAAAATTTAAAA\\n\"\n \"TTTATTTTTTATTTTTTTGTGTTGTTTCTT\"]\n # read2[0] AACACAACAAAGAA\n # read2[1] ATAAAAAATAAAAA\n # SNP ^ ^ \n # POS 123456789012345678901234567890\n # 40 50\n \n snp_list = [['test_chrom', 18, \"A\", \"C\"],\n ['test_chrom', 39, \"T\", \"G\"],\n ['test_chrom', 51, \"G\", \"T\"]]\n \n test_data = Data(genome_seqs=genome_seq,\n read1_seqs=read1_seqs,\n read2_seqs=read2_seqs,\n read1_quals=read1_quals,\n read2_quals=read2_quals,\n snp_list=snp_list)\n \n test_data.setup()\n test_data.index_genome_bowtie2()\n test_data.map_paired_bowtie2()\n test_data.sam2bam()\n\n find_intersecting_snps.main(test_data.bam_filename,\n snp_dir=test_data.snp_dir, \n is_paired_end=True, is_sorted=False)\n\n expect_reads = set([(\"AACGAAAAGGAGAC\", \"AAGAAACAACACAA\"),\n (\"AACGAAAAGGAGAC\", \"AAGAAACAAAACAA\"),\n (\"AACGAAAAGGAGAA\", \"AAGAAACAAAACAA\"),\n (\"ACAAAAATTTAAAA\", \"AAAAATAAAAAATA\"),\n (\"ACAAAAATTTAAAA\", \"AAAAATACAAAATA\"),\n (\"AAAAAAATTTAAAA\", \"AAAAATACAAAATA\")])\n\n #\n # Verify fastq1 and fastq2 have appropriate read pairs\n #\n with gzip.open(test_data.fastq1_remap_filename) as f:\n lines1 = [x.strip() for x in f.readlines()]\n assert len(lines1) == len(expect_reads) * 4\n\n with gzip.open(test_data.fastq2_remap_filename) as f:\n lines2 = [x.strip() for x in f.readlines()]\n assert len(lines2) == len(expect_reads) * 4\n for i in range(1, len(lines2), 4):\n read_pair = (lines1[i], lines2[i])\n assert read_pair in expect_reads\n expect_reads.remove(read_pair)\n\n assert len(expect_reads) == 0\n\n #\n # Verify that the keep file is empty since only\n # read needs to be remapped. Note that the\n # read_bam still gives back one empty line.\n #\n lines = read_bam(test_data.bam_keep_filename)\n assert len(lines) == 1\n assert lines[0] == ''\n\n test_data.cleanup()\n\n # TODO: test when only one half of read maps", "def fix_dihedrals_by_backbone_mapping(spc_1: ARCSpecies,\n spc_2: ARCSpecies,\n backbone_map: Dict[int, int],\n ) -> Tuple[ARCSpecies, ARCSpecies]:\n if not spc_1.rotors_dict or not spc_2.rotors_dict:\n spc_1.determine_rotors()\n spc_2.determine_rotors()\n spc_1_copy, spc_2_copy = spc_1.copy(), spc_2.copy()\n torsions = get_backbone_dihedral_angles(spc_1, spc_2, backbone_map)\n deviations = [get_backbone_dihedral_deviation_score(spc_1, spc_2, backbone_map, torsions=torsions)]\n # Loop while the deviation improves by more than 1 degree:\n while len(torsions) and (len(deviations) < 2 or deviations[-2] - deviations[-1] > 1):\n for torsion_dict in torsions:\n angle = 0.5 * sum([torsion_dict['angle 1'], torsion_dict['angle 2']])\n spc_1_copy.set_dihedral(scan=convert_list_index_0_to_1(torsion_dict['torsion 1']),\n deg_abs=angle, count=False, chk_rotor_list=False, xyz=spc_1_copy.get_xyz())\n spc_2_copy.set_dihedral(scan=convert_list_index_0_to_1(torsion_dict['torsion 2']),\n deg_abs=angle, count=False, chk_rotor_list=False, xyz=spc_2_copy.get_xyz())\n spc_1_copy.final_xyz, spc_2_copy.final_xyz = spc_1_copy.initial_xyz, spc_2_copy.initial_xyz\n torsions = get_backbone_dihedral_angles(spc_1_copy, spc_2_copy, backbone_map)\n deviations.append(get_backbone_dihedral_deviation_score(spc_1_copy, spc_2_copy, backbone_map, torsions=torsions))\n return spc_1_copy, spc_2_copy", "def saliencyExchange(S,inst1, inst2, ratio):\n # darken the original salient instance\n box = inst1['bbox']\n for seg in inst1['segmentation']:\n poly = np.array(seg).reshape((-1,2)) \n swap_cols(poly,0,1)\n #print(\"origin salient \", poly.shape)\n polyPath = mplpath.Path(poly)\n darken(S, box, polyPath, ratio)\n # brighten the new salient instance \n box =inst2['bbox']\n for seg in inst2['segmentation']:\n poly = np.array(seg).reshape((-1,2)) \n swap_cols(poly,0,1)\n #print(\"new_salient\" , poly.shape)\n polyPath = mplpath.Path(poly)\n brighten(S, box,polyPath, ratio)", "def match_cat():\n\n #Force to match only to galaxies\n\n gals = np.where((cosmos_cat.z_phot > 0.001) & (cosmos_cat.z_phot < 9.9))[0]\n cosmos_skycoord = SkyCoord(ra = cosmos_cat.ra[gals] * u.deg, dec = cosmos_cat.dec[gals] * u.deg)\n\n hsc_skycoord = SkyCoord(ra = hsc_cat.ra * u.deg, dec = hsc_cat.dec * u.deg)\n \n cosmos_index, dist_2d, dist_3d = hsc_skycoord.match_to_catalog_sky(cosmos_skycoord)\n\n return gals[cosmos_index], dist_2d.to('arcsec').value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the neighbor ids of a given grain. This function find the ids of the neighboring grains. A mask is constructed by dilating the grain to encompass the immediate neighborhood of the grain. The ids can then be determined using numpy unique function.
def find_neighbors(self, grain_id, distance=1): if not hasattr(self, 'grain_map'): return [] grain_data = self.grain_map == grain_id grain_data_dil = ndimage.binary_dilation(grain_data, iterations=distance).astype(np.uint8) neighbor_ids = np.unique(self.grain_map[grain_data_dil - grain_data == 1]) return neighbor_ids.tolist()
[ "def neighbor_indices(self):", "def get_poly_neighbor_ids(poly):\n\n neighbor_ids = set()\n\n for edge in poly.edges:\n neighbor_ids.update(edge.merged_edge.polygon_ids)\n\n neighbor_ids.remove(poly.id)\n\n return neighbor_ids", "def _localNonOverlappingCellIDs(self):\n return numerix.arange(self.mesh.overlap['front'] * self.mesh.nx * self.mesh.ny, \n (self.mesh.nz - self.mesh.overlap['back']) * self.mesh.nx * self.mesh.ny)", "def get_crop_indices_from_mask(mask):\n ys, xs = np.where(mask == 255)\n return min(ys), max(ys), min(xs), max(xs)", "def retrieveCellIds(self, listOfPoints, containedOnly=False):\n cellIds = []\n for cntb, bound in enumerate(listOfPoints):\n cellIds.append([])\n for point in bound:\n cellIds[cntb].extend(self.gridContainer['vertexToCellIds'][tuple(point)])\n if cntb == 0:\n previousSet = set(cellIds[cntb])\n if containedOnly:\n previousSet = set(previousSet).intersection(cellIds[cntb])\n else:\n previousSet.update(cellIds[cntb])\n\n return list(set(previousSet))", "def all_in_neighbors_(G,S):\n nbrs = set([])\n for nidx in S:\n nbrs.update(set(G.in_neighbors_(nidx)))\n return nbrs", "def edge_mask(mask):\n\n # Sagittal profile\n brain = mask.any(axis=0)\n\n # Simple edge detection\n edgemask = 4 * brain - np.roll(brain, 1, 0) - np.roll(brain, -1, 0) - \\\n np.roll(brain, 1, 1) - np.roll(brain, -1, 1) != 0\n return edgemask.astype('uint8')", "def find_vertices_cell_ids(surf, cell_ids):\n # Get faces of surface\n faces = surf.faces.reshape(-1,4)[:,1:4]\n return np.unique(faces[cell_ids,:].flatten())", "def all_in_neighbors(G,S):\n nbrs = set([])\n for nobj in S:\n nbrs.update(set(G.in_neighbors(nobj)))\n return nbrs", "def get_neighborhood(self, ids, radius=1, full_subgraph=True):\n\n\n verts = ids\n\n ## find the vertices within radius (and the path edges)\n for i in range(radius):\n edges_out = self.get_edges(src_ids=verts)\n edges_in = self.get_edges(dst_ids=verts)\n\n verts = list(edges_in['__src_id']) + list(edges_in['__dst_id']) + \\\n list(edges_out['__src_id']) + list(edges_out['__dst_id'])\n verts = list(set(verts))\n\n ## make a new graph to return and add the vertices\n g = SGraph()\n g = g.add_vertices(self.get_vertices(verts), vid_field='__id')\n\n ## add the requested edge set\n if full_subgraph is True:\n induced_edge_out = self.get_edges(src_ids=verts)\n induced_edge_in = self.get_edges(dst_ids=verts)\n df_induced = induced_edge_out.append(induced_edge_in)\n df_induced = df_induced.groupby(df_induced.column_names(), {})\n\n verts_sa = SArray(list(verts))\n edges = df_induced.filter_by(verts_sa, \"__src_id\")\n edges = edges.filter_by(verts_sa, \"__dst_id\")\n\n else:\n path_edges = edges_out.append(edges_in)\n edges = path_edges.groupby(path_edges.column_names(), {})\n\n g = g.add_edges(edges, src_field='__src_id', dst_field='__dst_id')\n return g", "def neighbors_xy(x, y, shape):\n return neighbors(mask_xy(x, y, shape))", "def in_neighbors(u, miia):\n result_set = []\n for path in miia:\n if u in path and path[0] != u:\n result_set.append(path[path.index(u) - 1])\n return result_set", "def __get_spatial_neighbors(self):\n row = self.gdf[self.gdf['shapeID'] == self.target_id].squeeze()\n target_neighbors = self.gdf[~self.gdf.geometry.disjoint(row.geometry)].shapeID.tolist()\n neighbors = target_neighbors\n\n all_neighbors = {}\n self.degree_dict[0] = [self.target_id]\n self.degree_dict[1] = [i for i in target_neighbors if i != self.target_id]\n \n # Get neighbors\n for i in range(self.degrees):\n new_n = []\n for n in neighbors:\n cur_row = self.gdf[self.gdf['shapeID'] == n].squeeze()\n cur_neighbors = self.gdf[~self.gdf.geometry.disjoint(cur_row.geometry)].shapeID.tolist()\n if n not in all_neighbors.keys():\n all_neighbors[n] = cur_neighbors\n new_n.append(n)\n if i != 0:\n self.degree_dict[i + 1] = new_n\n\n k = [v for k,v in all_neighbors.items()]\n k = list(set([item for sublist in k for item in sublist]))\n k = [i for i in k if i not in all_neighbors.keys()]\n neighbors = k\n\n if len(neighbors) == 0:\n break\n\n # Cleanup: remove all ofthe neighbors of neighbors that are more than one degree fromt he target node\n # i.i. remove all of the muiciaplites in the values that are not in the keys\n u_vals = list(set([item for sublist in all_neighbors.values() for item in sublist]))\n remove_vals = [i for i in u_vals if i not in all_neighbors.keys()]\n for k,v in all_neighbors.items():\n to_remove = [j for j in v if j in remove_vals]\n for tr in to_remove:\n all_neighbors[k] = [i for i in all_neighbors[k] if i not in tr]\n\n return all_neighbors", "def _generate_mask(self) -> ndarray:\n # calculate mean 3x3 (square nbhood) orography heights\n radius = number_of_grid_cells_to_distance(self.topography, 1)\n topo_nbhood = NeighbourhoodProcessing(\"square\", radius)(self.topography)\n topo_nbhood.convert_units(\"m\")\n\n # create mask\n mask = np.full(topo_nbhood.shape, False, dtype=bool)\n mask = np.where(topo_nbhood.data < self.orog_thresh_m, True, mask)\n mask = np.where(self.humidity.data < self.rh_thresh_ratio, True, mask)\n mask = np.where(abs(self.vgradz) < self.vgradz_thresh_ms, True, mask)\n return mask", "def find_land_squares_indices(self):\n return np.transpose(np.nonzero(self.landscape))", "def linker(cls, state):\n edges_index = cls.linker_neighbors(state)\n value2value = state.reshape(-1, 3)[edges_index]\n mask = torch.all(value2value[0] == elem.WALL, -1) & torch.all(\n value2value[1] == elem.WALL, -1\n )\n edges_index = edges_index[:, ~mask]\n return edges_index", "def unknown_neighbors_of_8(mapdata, x, y, visited):\n\n # CREATES AN EMPTY OUTPUT LIST\n unknown_neighbors = []\n # LISTS ALL NEIGHBORS\n xa = int(x - 1)\n xb = int(x + 1)\n ya = int(y - 1)\n yb = int(y + 1)\n neighbors = [(x, yb), (xa, yb), (xa, y), (xa, ya), (x, ya), (xb, ya), (xb, y), (xb, yb)]\n # ITERATES THROUGH LIST OF NEIGHBORS\n for coord in neighbors:\n # ADDS UNKNOWN NEIGHBORS TO THE OUTPUT LIST\n unknown_index = FindFrontier.grid_to_index(mapdata, coord[0], coord[1])\n if mapdata.data[unknown_index] == -1 and unknown_index not in visited:\n unknown_neighbors.append(coord)\n # RETURNS OUTPUT LIST\n return unknown_neighbors", "def retrieveCellIds(cls, listOfPoints):", "def get_neighborhood_information(\n source_id,\n overwrite=0,\n min_n_nbhrs=1000,\n manual_gmag_limit=None\n ):\n\n #\n # Get the targetname\n #\n ticid = gaiadr2_to_tic(str(source_id))\n toiid = ticid_to_toiid(ticid)\n\n if isinstance(toiid, str):\n targetname = toiid\n else:\n targetname = 'TIC{}.01'.format(ticid)\n\n #\n # Get Gaia information for target.\n #\n enforce_all_sourceids_viable = True\n savstr = '_nbhdonly'\n\n target_d = objectid_search(\n source_id,\n columns=('source_id', 'ra','dec', 'ra_error', 'dec_error',\n 'phot_g_mean_mag', 'phot_bp_mean_mag', 'phot_rp_mean_mag',\n 'l','b', 'parallax, parallax_error', 'pmra','pmra_error',\n 'pmdec','pmdec_error', 'radial_velocity'),\n forcefetch=True,\n gaia_mirror='vizier'\n )\n target_df = pd.read_csv(target_d['result'])\n assert len(target_df) == 1\n\n # now acquire the mean properties of the group, and query the neighborhood\n # based on those properties. the number of neighbor stars to randomly\n # select is min(5* the number of group members, 5000). (cutoff group\n # bounds based on parallax because further groups more uncertain).\n bounds = {}\n params = ['parallax', 'ra', 'dec']\n\n plx_mean = float(target_df.parallax)\n\n n_nbhrs = 0\n n_std = 5\n n_std_incr = 10\n n_std_max = 200\n\n if plx_mean > 10:\n n_std = 5\n n_std_incr = 20\n n_std_max = 1000\n\n while n_nbhrs < min_n_nbhrs:\n\n if n_std > n_std_max:\n return None\n\n LOGINFO('trying when bounding by {} stdevns'.format(n_std))\n\n for param in params:\n mult = 1 if 'parallax' in param else 2\n bounds[param+'_upper'] = (\n float(target_df[param]) + mult*n_std*float(target_df[param + '_error'])\n )\n bounds[param+'_lower'] = (\n float(target_df[param]) - mult*n_std*float(target_df[param + '_error'])\n )\n\n if bounds['parallax_lower'] < 0:\n bounds['parallax_lower'] = 0\n if bounds['ra_upper'] > 360:\n bounds['ra_upper'] = 359.999\n if bounds['ra_lower'] < 0:\n bounds['ra_lower'] = 0\n if bounds['dec_upper'] > 90:\n bounds['dec_upper'] = 89.999\n if bounds['dec_lower'] < -90:\n bounds['dec_lower'] = -89.999\n\n n_max = int(1e4)\n\n if manual_gmag_limit is None:\n manual_gmag_limit = 17\n\n groupname = '{}'.format(source_id)\n # only force overwrite if iterating\n if n_nbhrs == 0:\n nbhd_df = query_neighborhood(bounds, groupname, n_max=n_max,\n overwrite=overwrite,\n manual_gmag_limit=manual_gmag_limit)\n else:\n nbhd_df = query_neighborhood(bounds, groupname, n_max=n_max,\n overwrite=True,\n manual_gmag_limit=manual_gmag_limit)\n\n n_nbhrs = len(nbhd_df)\n LOGINFO(42*'=')\n LOGINFO('Got {} neighborhods, when minimum was {}'.\n format(n_nbhrs, min_n_nbhrs))\n LOGINFO(42*'=')\n\n n_std += n_std_incr\n\n n_std = 3\n pmdec_min = np.nanmean(nbhd_df['pmdec']) - n_std*np.nanstd(nbhd_df['pmdec'])\n pmdec_max = np.nanmean(nbhd_df['pmdec']) + n_std*np.nanstd(nbhd_df['pmdec'])\n pmra_min = np.nanmean(nbhd_df['pmra']) - n_std*np.nanstd(nbhd_df['pmra'])\n pmra_max = np.nanmean(nbhd_df['pmra']) + n_std*np.nanstd(nbhd_df['pmra'])\n\n pmdec_min = min((pmdec_min, float(target_df['pmdec'])))\n pmdec_max = max((pmdec_max, float(target_df['pmdec'])))\n pmra_min = min((pmra_min, float(target_df['pmra'])))\n pmra_max = max((pmra_max, float(target_df['pmra'])))\n\n return (targetname, groupname, target_df, nbhd_df,\n pmdec_min, pmdec_max, pmra_min, pmra_max)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dilate a single grain overwriting the neighbors.
def dilate_grain(self, grain_id, dilation_steps=1, use_mask=False): grain_volume_init = (self.grain_map == grain_id).sum() grain_data = self.grain_map == grain_id grain_data = ndimage.binary_dilation(grain_data, iterations=dilation_steps).astype(np.uint8) if use_mask and hasattr(self, 'mask'): grain_data *= self.mask.astype(np.uint8) self.grain_map[grain_data == 1] = grain_id grain_volume_final = (self.grain_map == grain_id).sum() print('grain %s was dilated by %d voxels' % (grain_id, grain_volume_final - grain_volume_init))
[ "def dilate_grains(self, dilation_steps=1, dilation_ids=None):\n if not hasattr(self, 'grain_map'):\n raise ValueError('microstructure %s must have an associated grain_map attribute' % self.name)\n return\n\n grain_map = self.grain_map.copy()\n # get rid of overlap regions flaged by -1\n grain_map[grain_map == -1] = 0\n\n if hasattr(self, 'mask'):\n grain_map = Microstructure.dilate_labels(grain_map, dilation_steps=dilation_steps, mask=self.mask, dilation_ids=dilation_ids)\n else:\n grain_map = Microstructure.dilate_labels(grain_map, dilation_steps=dilation_steps, dilation_ids=dilation_ids)\n # finally assign the dilated grain map to the microstructure\n self.grain_map = grain_map", "def update_gradient(self):\n \n #Seed robots need no further update.\n if self.seed:\n return\n \n #Gradient distance\n G = 3*self.radius\n \n #Only consider neighbors closer than G\n grad_vals = [s[2] for s in self.world.scan(self.ID) if s[0]<G]\n \n if grad_vals:\n self.grad_val = min(grad_vals)+1+self.rand_nonce", "def unshard_grad(self):\n if not self.uses_sharded_strategy:\n self._use_unsharded_grad_views()\n return\n flat_param = self.flat_param\n self._check_unsharded(flat_param)\n\n # Check if all ranks have a `None` gradient\n num_grad_none = torch.zeros(1, dtype=torch.int32, device=self.device)\n num_grad_none[0] = flat_param.grad is None\n dist.all_reduce(num_grad_none, group=self.process_group)\n if num_grad_none[0] == self.world_size:\n flat_param._saved_grad_shard = None # type: ignore[assignment]\n self._use_unsharded_grad_views()\n return\n\n padded_unsharded_grad = torch.empty(\n flat_param._padded_unsharded_size, # type: ignore[attr-defined]\n device=self.device,\n )\n if flat_param.grad is None:\n # In the case that only some ranks have `None` gradient, we use\n # zeros to approximate as a best effort attempt\n if self._debug_level == dist.DebugLevel.INFO:\n warnings.warn(\n f\"[Rank {self.rank}] Only some but not all ranks have a \"\n \"`None` `FlatParameter` gradient, so FSDP is using zeros to \"\n \"approximate those ranks' sharded gradients being `None`\"\n )\n flat_param._saved_grad_shard = None # type: ignore[assignment]\n sharded_grad = torch.zeros(flat_param._sharded_size, device=self.device) # type: ignore[attr-defined]\n else:\n self._check_sharded(flat_param.grad)\n flat_param._saved_grad_shard = flat_param.grad # type: ignore[attr-defined]\n sharded_grad = flat_param._saved_grad_shard # type: ignore[attr-defined]\n dist.all_gather_into_tensor(\n padded_unsharded_grad, sharded_grad, self.process_group\n )\n unsharded_size = self.flat_param._unpadded_unsharded_size\n flat_param.grad = padded_unsharded_grad[: unsharded_size.numel()].view(\n unsharded_size\n )\n self._use_unsharded_grad_views()", "def regenerate_skylight(self):\n\n lightmap = zeros((16, 16, 128), dtype=uint8)\n\n for x, z in product(xrange(16), repeat=2):\n # The maximum lighting value, unsurprisingly, is 0xf, which is the\n # biggest possible value for a nibble.\n light = 0xf\n\n # Apparently, skylights start at the block *above* the block on\n # which the light is incident?\n height = self.heightmap[x, z] + 1\n\n # The topmost block, regardless of type, is set to maximum\n # lighting, as are all the blocks above it.\n lightmap[x, z, height:] = light\n\n # Dim the light going throught the remaining blocks, until there\n # is no more light left.\n for y in range(height, -1, -1):\n dim = blocks[self.blocks[x, z, y]].dim\n light -= dim\n if light <= 0:\n break\n\n lightmap[x, z, y] = light\n\n # Now it's time to spread the light around. This flavor uses extra\n # memory to speed things up; the basic idea is to spread *all* light,\n # one glow level at a time, rather than spread each block\n # individually.\n max_height = amax(self.heightmap)\n lightable = vectorize(lambda block: blocks[block].dim < 15)(self.blocks)\n # Protip: This is a bitwise AND because logical ANDs on arrays can't\n # happen in Numpy.\n unlighted = logical_not(lightmap) & lightable\n\n # Create a mask to find all blocks that have an unlighted block\n # as a neighbour in the xz-plane.\n mask = zeros((16, 16, max_height), dtype=bool)\n mask[:-1,:,:max_height] |= unlighted[1:, :, :max_height]\n mask[:,:-1,:max_height] |= unlighted[:, 1:, :max_height]\n mask[1:,:,:max_height] |= unlighted[:-1, :, :max_height]\n mask[:,1:,:max_height] |= unlighted[:, :-1, :max_height]\n\n # Apply the mask to the lightmap to find all lighted blocks with one\n # or more unlighted blocks as neighbours.\n edges = logical_and(mask, lightmap[:, :, :max_height]).nonzero()\n\n spread = [tuple(coords) for coords in transpose(edges)]\n visited = set()\n\n # Run the actual glow loop. For each glow level, go over unvisited air\n # blocks and illuminate them.\n for glow in range(14, 0, -1):\n for coords in spread:\n if lightmap[coords] <= glow:\n visited.add(coords)\n continue\n\n for dx, dz, dy in (\n (1, 0, 0),\n (-1, 0, 0),\n (0, 1, 0),\n (0, -1, 0),\n (0, 0, 1),\n (0, 0, -1)):\n x, z, y = coords\n x += dx\n z += dz\n y += dy\n\n if not (0 <= x < 16 and\n 0 <= z < 16 and\n 0 <= y < 128):\n continue\n\n if (x, z, y) in visited:\n continue\n\n if lightable[x, z, y] and lightmap[x, z, y] < glow:\n lightmap[x, z, y] = glow - blocks[self.blocks[x, z, y]].dim\n visited.add((x, z, y))\n spread = visited\n visited = set()\n\n self.skylight = lightmap.clip(0, 15)", "def create_garden(self):\n\n counter = 0\n while self.garden_area < self.garden_area_target:\n zero = nullgard = zone = zapped = clip = ring_suitability = new_cells = None\n # self.wipe_locks()\n counter += 1\n if s.DEBUG_MODE:\n logging.info('counter: %s' % counter)\n logging.info('garden area pre create_garden: %s' % self.garden_area)\n\n # Set nodata values in garden grid to 0\n zero = arcpy.sa.Con(arcpy.sa.IsNull(self.garden) == 1, 0, self.garden)\n # if s.DEBUG_MODE:\n zero.save(os.path.join(s.TEMP_DIR, \"zero_%s.tif\" % counter))\n\n # Create another grid where current garden is NODATA and all other values = 0\n nullgard = arcpy.sa.SetNull(zero == s.GARDEN_ID, 0)\n # if s.DEBUG_MODE:\n nullgard.save(os.path.join(s.TEMP_DIR, \"nullgard_%s.tif\" % counter))\n\n # Expand potential garden grid by one cell\n zone = arcpy.sa.Expand(self.garden, 1, s.GARDEN_ID)\n # if s.DEBUG_MODE:\n zone.save(os.path.join(s.TEMP_DIR, \"zone_%s.tif\" % counter))\n\n # Create a clipping raster for gardens\n zapped = arcpy.sa.Plus(nullgard, self.local_suitability)\n # if s.DEBUG_MODE:\n zapped.save(os.path.join(s.TEMP_DIR, \"zapped_%s.tif\" % counter))\n\n # Clip expanded garden grid by removing unsuitable areas and places where garden currently exists\n clip = arcpy.sa.ExtractByMask(zone, zapped)\n array = arcpy.RasterToNumPyArray(clip)\n unique_values = numpy.unique(array, return_counts=True)\n value_dict = dict(zip(unique_values[0], unique_values[1]))\n if s.GARDEN_ID not in value_dict.keys():\n logging.info('no new cells can be added')\n break\n\n # if s.DEBUG_MODE:\n clip.save(os.path.join(s.TEMP_DIR, 'clip_%s.tif' % counter))\n\n ring_suitability = arcpy.sa.Con(clip, self.local_suitability)\n # if s.DEBUG_MODE:\n ring_suitability.save(os.path.join(s.TEMP_DIR, 'ring_suitability_%s.tif' % counter))\n\n new_cells = arcpy.sa.Con(ring_suitability == ring_suitability.maximum, s.GARDEN_ID)\n # if s.DEBUG_MODE:\n new_cells.save(os.path.join(s.TEMP_DIR, 'new_cells_%s.tif' % counter))\n\n new_cells_area = self.get_garden_area(new_cells)\n\n if (new_cells_area + self.garden_area) <= self.garden_area_target:\n self.garden = arcpy.sa.Con(zero == s.GARDEN_ID, s.GARDEN_ID,\n arcpy.sa.Con(new_cells == s.GARDEN_ID, s.GARDEN_ID, self.garden))\n\n else:\n random_cells = arcpy.sa.Con(new_cells, self.randrast)\n array = arcpy.RasterToNumPyArray(random_cells)\n random_values = numpy.unique(array).tolist()\n random.shuffle(random_values)\n\n while self.garden_area < self.garden_area_target:\n r = random_values.pop()\n new_cell = arcpy.sa.Con(random_cells == r, s.GARDEN_ID)\n\n self.garden = arcpy.sa.Con(arcpy.sa.IsNull(new_cell) == 0, new_cell, self.garden)\n\n self.garden_area = self.get_garden_area(self.garden)\n # if s.DEBUG_MODE:\n new_cell.save(os.path.join(s.TEMP_DIR, 'new_cell_%s.tif' % counter))\n\n self.garden_area = self.get_garden_area(self.garden)\n del zero, zone, zapped, nullgard, clip, ring_suitability, new_cells\n # utils.clear_dir_by_pattern(s.TEMP_DIR, '.cpg')\n # utils.clear_dir_by_pattern(self.OUTPUT_DIR, '.cpg')\n if s.DEBUG_MODE:\n logging.info('finished create_garden {}'.format(counter))\n\n path_garden = os.path.join(self.OUTPUT_DIR, 'garden_{}.tif'.format(self.year))\n self.garden.save(path_garden)\n logging.info('finished create_garden: {}'.format(self.garden))", "def remove_dipole(self, diprip):\r\n\r\n self.dipoles = np.delete(self.dipoles, diprip)\r\n self.n_dips -= 1", "def randomize(self):\n for i in range(lang.gbs_builtins.NUM_COLORS):\n self.stones[i] = self.stones.get(i, 0) + stone_dist()", "def consumeGrain(self):\r\n # Consume grain for all workers\r\n self.model.totalGrain -= self.workers * 160\r\n self.grain -= self.workers * 160 \r\n \r\n # Decrement amount of workers if grain is less than or equal to zero (also impacts overall population numbers)\r\n if (self.grain <= 0):\r\n self.model.totalGrain -= self.grain # Add back negative grain to prevent negatve grain in model and incorrect grain representation\r\n self.grain = 0\r\n self.workers -= 1\r\n self.settlement.population -= 1\r\n self.model.totalPopulation -= 1\r\n\r\n # Check if there are still workers in the Household\r\n if self.workers <= 0:\r\n # Removes ownership of all fields\r\n for f in self.fields:\r\n f.owned = False\r\n # Decrements the amount of households and removes this household from the simulation\r\n self.settlement.noHouseholds -= 1\r\n self.model.schedule.remove(self)", "def add_noise(self,lam=self.lam):\n self.noise = np.random.poisson(lam=lam, size=self.cluster.shape)\n self.image += self.noise\n return", "def add_seed(self, x, y):\n seed = self.get_absolute_pos(x, y)\n if self.regions.add_seed(seed):\n mask = self.flood_fill(x, y)\n self.regions.update_mask(mask)", "def diffuse(self):\n transmission_coeff = 0.3\n # allow the grid to cool down\n sink_coeff = 0.1\n for idx, cell in self.grid.cells():\n # how much total heat the cell radiates\n emission_loss = cell.heat * transmission_coeff\n neighbors = self.grid.neighbors(idx)\n for nidx,n in neighbors:\n # Only colder cells (positive delta) will absorb the heat.\n # Sum of transmissions cannot be greater that the total emission.\n delta = cell.heat - n.heat\n n.heat += emission_loss / len(neighbors)\n cell.heat -= emission_loss + (cell.heat * sink_coeff)", "def copy(self):\n return GrainBoundary(\n self.grain_0.copy(),\n self.grain_1.copy(),\n None,\n None,\n None,\n self.average_lattice,\n self.translation_vec.copy(),\n self.vacuum,\n self.merge_tol,\n self.reconstruction,\n )", "def spread_disease(self):\n # Store the current state of disease for convenience of simultaneity.\n disease = dict(self.disease)\n\n # Update self.disease at all locations.\n for loc in self.locations:\n # Growth\n self.disease[loc] += disease[loc] * self.growth\n # Spreading\n if disease[loc] >= self.threshold:\n for neighbor in self.conn[loc]:\n self.disease[neighbor] += disease[loc] * self.spread\n\n # Fix a detail: disease will not grow at or spread to the location of the agent.\n self.disease[self.location] = disease[self.location]", "def drunk_pirate(self, pirate):\n pirate.owner.drunk_pirates.append(pirate)\n pirate.drink_turns.append(self.turn + 1)\n pirate.turns_to_sober = self.turns_to_sober", "def storageLoss(self):\r\n self.model.totalGrain -= round(self.grain * 0.1) # Prevent grain going to a float because unrestricted types\r\n self.grain -= round(self.grain*0.1)", "def kill_sensor(self):\n self.sensor.set_light_color(17)", "def uncover_neighbors(self, row, column):\n super().uncover_neighbors(row, column)\n\n for row_offset, column_offset in product((-1, 0, 1), (-1, 0, 1)):\n try:\n if self.cells[row + row_offset][column + column_offset].state == \"uncovered\" and \\\n row + row_offset >= 0 and column + column_offset >= 0 and \\\n self.neighboring_bombs(row + row_offset, column + column_offset) - \\\n self.neighboring_flags(row + row_offset, column + column_offset) >= 0 and \\\n self.neighboring_uncovered(row + row_offset, column + column_offset) > 0 and \\\n not self.cells[row + row_offset][column + column_offset] in self.list_active_cells():\n self.insert_active_cell(self.cells[row + row_offset][column + column_offset])\n except (TypeError, IndexError):\n pass", "def balanceWhite(self, src, dst=...) -> dst:\n ...", "def lstm_forget_gate_init_(cell: nn.RNNBase, value: float = 1.0) -> None:\n with torch.no_grad():\n for _, _, ih_b, hh_b in cell.all_weights:\n length = len(ih_b)\n ih_b.data[length // 4:length // 2].fill_(value)\n hh_b.data[length // 4:length // 2].fill_(value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dilate labels isotropically to fill the gap between them. This code is based on the gtDilateGrains function from the DCT code. It has been extended to handle both 2D and 3D cases.
def dilate_labels(array, dilation_steps=1, mask=None, dilation_ids=None, struct=None): from scipy import ndimage if struct is None: struct = ndimage.morphology.generate_binary_structure(array.ndim, 1) assert struct.ndim == array.ndim # carry out dilation in iterative steps for step in range(dilation_steps): if dilation_ids: grains = np.isin(array, dilation_ids) else: grains = (array > 0).astype(np.uint8) grains_dil = ndimage.morphology.binary_dilation(grains, structure=struct).astype(np.uint8) if mask is not None: # only dilate within the mask grains_dil *= mask.astype(np.uint8) todo = (grains_dil - grains) # get the list of voxel for this dilation step if array.ndim == 2: X, Y = np.where(todo) else: X, Y, Z = np.where(todo) xstart = X - 1 xend = X + 1 ystart = Y - 1 yend = Y + 1 # check bounds xstart[xstart < 0] = 0 ystart[ystart < 0] = 0 xend[xend > array.shape[0] - 1] = array.shape[0] - 1 yend[yend > array.shape[1] - 1] = array.shape[1] - 1 if array.ndim == 3: zstart = Z - 1 zend = Z + 1 zstart[zstart < 0] = 0 zend[zend > array.shape[2] - 1] = array.shape[2] - 1 dilation = np.zeros_like(X).astype(np.int16) print('%d voxels to replace' % len(X)) for i in range(len(X)): if array.ndim == 2: neighbours = array[xstart[i]:xend[i] + 1, ystart[i]:yend[i] + 1] else: neighbours = array[xstart[i]:xend[i] + 1, ystart[i]:yend[i] + 1, zstart[i]:zend[i] + 1] if np.any(neighbours): # at least one neighboring voxel in non zero dilation[i] = min(neighbours[neighbours > 0]) if array.ndim == 2: array[X, Y] = dilation else: array[X, Y, Z] = dilation print('dilation step %d done' % (step + 1)) return array
[ "def augmentation(dataset, labels):\n\n print(\"Augmentation\")\n\n # if necessary create aug dir and make sure it's empty\n if not os.path.exists(config.aug_dir):\n os.makedirs(config.aug_dir)\n else:\n os.system('rm -rf %s/*' % config.aug_dir)\n\n # sort ids based on category\n split_categories = {0: [], 1: []}\n for id in dataset:\n split_categories[labels[id]].append(id)\n\n # calculate the amount of missing images to be augmented\n missing = {0: max(0, config.class_total - len(split_categories[0])), 1: max(0, config.class_total - len(split_categories[1]))}\n print(\" missing \" + config.class0 + \" data: \", missing[0])\n print(\" missing \" + config.class1 + \" data: \", missing[1])\n\n cnt = 0\n\n # loop over categories\n for cat in split_categories:\n\n # loop over missing repetitions of whole dataset\n for rep_idx in range(math.floor(missing[cat] / len(split_categories[cat]))):\n\n # loop over ids in dataset\n for id in split_categories[cat]:\n\n aug_name = \"aug\" + str(cnt) + \"_\" + id\n\n # update labels + dataset\n labels[aug_name] = cat\n dataset = np.append(dataset, aug_name)\n\n # augment image + save\n aug_image = mixing(id, split_categories[cat])\n np.save(config.aug_dir + aug_name + \".npy\", aug_image)\n\n cnt += 1\n\n # loop over rest of the missing images\n for rest_idx in range(missing[cat] % len(split_categories[cat])):\n\n id = split_categories[cat][rest_idx]\n aug_name = \"aug\" + str(cnt) + \"_\" + id\n\n # update labels + dataset\n labels[aug_name] = cat\n dataset = np.append(dataset, aug_name)\n\n # augment image + save\n aug_image = mixing(id, split_categories[cat])\n np.save(config.aug_dir + aug_name + \".npy\", aug_image)\n\n cnt += 1\n\n return dataset, labels", "def trimLabel(labels, minSize=100, verbose = False):\n\t\n\t\n\tfor obj in np.unique(labels):\n\t\tif obj == 0: # Object 0 is the background\n\t\t\tcontinue\n\t\ty, x = np.where(labels == obj)\n\t\tsize = len(x)*len(y)\n\t\tif size < minSize:\n\t\t\tif verbose:\n\t\t\t\tprint '\\tObject %d was to small (%d < %d), ignoring' % (obj, \\\n\t\t\t\t\tsize, minSize)\n\t\t\tlabels[y,x] = 0\n\t\n\t# Return trimmed labels\n\tlabels[np.where(labels != 0)] = 1\n\t\n\treturn labels", "def _preprocess_labels(labels):\n BAD_LABEL = 999\n # iterate through each label\n for i, label in enumerate(labels):\n for j, l in enumerate(label):\n if l == BAD_LABEL:\n labels[i,j] = 0\n return labels", "def custom_pad(batch_observations):\n seqs = [x[0].sentence for x in batch_observations]\n lengths = torch.tensor([len(x) for x in seqs], device=\"cpu\")\n label_shape = batch_observations[0][1].shape\n maxlen = int(max(lengths))\n label_maxshape = [maxlen for x in label_shape]\n labels = [-torch.ones(*label_maxshape, device=\"cpu\") for x in seqs]\n for index, x in enumerate(batch_observations):\n length = x[1].shape[0]\n if len(label_shape) == 1:\n labels[index][:length] = x[1]\n elif len(label_shape) == 2:\n labels[index][:length, :length] = x[1]\n else:\n raise ValueError(\"Labels must be either 1D or 2D right now; got either 0D or >3D\")\n labels = torch.stack(labels)\n return seqs, labels, lengths, batch_observations", "def remove_empty_slices(img, label, ratio=0):\n L = []\n for i in range(img.shape[0]):\n if np.count_nonzero(label[i]) == 0:\n L.append(i)\n \n L = np.array(L)\n np.random.shuffle(L)\n L = L[:int(ratio*L.shape[0])].tolist()\n\n print(\"Before:\", img.shape, end=' After: ')\n img = np.delete(img, L, axis=0)\n label = np.delete(label, L, axis=0) \n print(img.shape)\n \n return img, label", "def dilate(img: np.ndarray = None, kernel: np.ndarray = None) -> np.ndarray:\n # KERNEL\n h, w = img.shape[:2]\n res = np.full(img.shape, fill_value=255, dtype=np.uint8)\n kh, kw = kernel.shape[:2]\n\n ky, kx = (kh-1) // 2, (kw-1) // 2\n print(\"kh = {}\".format(kh))\n print(\"kw = {}\".format(kw))\n print(\"ky = {}\".format(ky))\n print(\"kw = {}\".format(kx))\n #grid = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n\n ########################## YOUR CODE HERE ######################\n \n #blur=cv.blur(img,(5,5))\n res = cv.dilate(img, kernel, iterations=1)\n ########################### TODO ###############################\n ### HINT: USE `cv.add()` ###\n #res=cv.add(np.zeros(img.shape, dtype=np.uint8),res)\n ### HINT: EROSION IS MIN OPERATION ###\n pass\n\n ################################################################\n\n return res", "def filter_labels(Labels, initial_distance):\n Filtered_labels = [0]\n distance = initial_distance\n while len(unique(Filtered_labels)) != len(unique(Labels)):\n count_near_day = {}\n for d in range(len(Labels)):\n count_near_day[d] = {}\n nearby = get_nearby(d - distance, d + distance, Labels)\n for p in unique(nearby):\n c = nearby.count(p)\n count_near_day[d][p] = c\n \n Filtered_labels = np.zeros(len(Labels))\n for d in range(len(Labels)):\n index = np.argmax(list(count_near_day[d].values()))\n Filtered_labels[d] = list(count_near_day[d].keys())[index]\n \n distance -= 1\n\n return Filtered_labels", "def tian_dense_labels(Y, n_rows):\n n_slices, n_boundaries, n_cols = Y.shape\n assert(n_boundaries == 9)\n\n # make sure values are all integers\n Y = np.round(Y)\n\n Y_dense = TIAN_FILL_VALUE_ABOVE * np.ones((n_slices, n_rows, n_cols), dtype=np.int32)\n\n for s in range(n_slices):\n for col in range(n_cols):\n region_1_2 = np.arange(Y[s,0,col], Y[s,1,col]).astype(np.int32)\n Y_dense[s,region_1_2,col] = 1\n \n region_2_4 = np.arange(Y[s,1,col], Y[s,2,col]).astype(np.int32)\n Y_dense[s,region_2_4,col] = 2\n \n region_4_6 = np.arange(Y[s,2,col], Y[s,4,col]).astype(np.int32)\n Y_dense[s,region_4_6,col] = 3\n \n region_6_11 = np.arange(Y[s,4,col], Y[s,7,col]).astype(np.int32)\n Y_dense[s,region_6_11,col] = 4\n\n region_rest = np.arange(Y[s,7,col], n_rows).astype(np.int32)\n Y_dense[s,region_rest,col] = TIAN_FILL_VALUE_BELOW\n\n return Y_dense", "def update_labels(self):\n dims = [n for n in range(self.__dimension.get_dim_size())]\n for i in range(self.__uncertain_bits_num):\n index = ToolFunction.sample_uniform_integer(0, self.__dimension.get_dim_size() - i - 1)\n self.__labels[dims[index]] = False\n dims.remove(dims[index])", "def detections2labels(detections, transformation = None, projection = None, imgShape = None):\n # initialize list with labels\n labels = []\n for i in range(detections.shape[0]):\n Detection = detections[i]\n label = {\n 'category': Detection['category'].decode('ascii'),\n 'box3D': {\n 'location': {'x': Detection['position']['x'], 'y': Detection['position']['y'], 'z': Detection['position']['z']},\n 'dimensions': {'height': Detection['height'], 'width': Detection['width'], 'length': Detection['length']},\n 'rotation_y': Detection['position']['rotation_y'],\n },\n 'info': {'weight': Detection['weight']},\n }\n if transformation is not None:\n label['box3D'] = geometry.transform3DBox(label['box3D'], transformation)\n if projection is not None:\n label['box2Duntruncated'] = geometry.project3DBox(label['box3D'], projection)\n if imgShape is not None:\n box2Dtruncated = copy.deepcopy(label['box2Duntruncated'])\n box2Dtruncated['left'] = max(0, box2Dtruncated['left'])\n box2Dtruncated['top'] = max(0, box2Dtruncated['top'])\n box2Dtruncated['right'] = min(imgShape[1], box2Dtruncated['right'])\n box2Dtruncated['bottom'] = min(imgShape[0], box2Dtruncated['bottom'])\n label['box2D'] = box2Dtruncated\n\n # calculate truncation\n org_size = (label['box2Duntruncated']['right']-label['box2Duntruncated']['left']) * (label['box2Duntruncated']['bottom']-label['box2Duntruncated']['top'])\n trunc_size = (label['box2D']['right']-label['box2D']['left']) * (label['box2D']['bottom']-label['box2D']['top'])\n if org_size > 0:\n label['info']['truncated'] = 1.0 - (trunc_size / org_size)\n else:\n label['info']['truncated'] = 0.0\n labels.append(label)\n return labels", "def demix_whole_data(Yd, cut_off_point=[0.95,0.9], length_cut=[15,10], th=[2,1], pass_num=1, residual_cut = [0.6,0.6],\n corr_th_fix=0.31, max_allow_neuron_size=0.3, merge_corr_thr=0.6, merge_overlap_thr=0.6, num_plane=1, patch_size=[100,100],\n plot_en=False, TF=False, fudge_factor=1, text=True, bg=False, max_iter=35, max_iter_fin=50,\n update_after=4):\n Yd_min = Yd.min();\n if Yd_min < 0:\n Yd_min_pw = Yd.min(axis=-1, keepdims=True);\n Yd -= Yd_min_pw;\n\n dims = Yd.shape[:2]\n T = Yd.shape[2]\n superpixel_rlt = []\n ## cut image into small parts to find pure superpixels ##\n patch_height = patch_size[0];\n patch_width = patch_size[1];\n height_num = int(np.ceil(dims[0]/patch_height));\n width_num = int(np.ceil(dims[1]/(patch_width*num_plane)));\n num_patch = height_num*width_num;\n patch_ref_mat = np.array(range(num_patch)).reshape(height_num, width_num, order=\"F\");\n\n a, c, b, fb, ff = (None, None, None, None, None)\n for ii in range(pass_num):\n print(f\"Execute #{ii} pass........\");\n if ii > 0:\n if bg:\n Yd_res = reconstruct(Yd, a, c, b, fb, ff);\n else:\n Yd_res = reconstruct(Yd, a, c, b);\n Yt = threshold_data(Yd_res, th=th[ii]);\n else:\n if th[ii] >= 0:\n Yt = threshold_data(Yd, th=th[ii]);\n else:\n Yt = Yd.copy();\n print(\"Get threshould data.....\")\n start = time.time();\n if num_plane > 1:\n connect_mat_1, idx, comps, permute_col = find_superpixel_3d(Yt,num_plane,cut_off_point[ii],length_cut[ii]);\n else:\n connect_mat_1, idx, comps, permute_col = find_superpixel(Yt,cut_off_point[ii],length_cut[ii]);\n print(\"time: \" + str(time.time()-start));\n if idx==0:\n continue\n start = time.time();\n print(\"Initialize A and C components....\")\n if ii > 0:\n c_ini, a_ini, _, _ = spatial_temporal_ini(Yt, comps, idx, length_cut[ii], bg=False);\n else:\n c_ini, a_ini, ff, fb = spatial_temporal_ini(Yt, comps, idx, length_cut[ii], bg=bg);\n print(\"time: \" + str(time.time()-start));\n unique_pix = np.asarray(np.sort(np.unique(connect_mat_1)),dtype=\"int\");\n unique_pix = unique_pix[np.nonzero(unique_pix)];\n brightness_rank_sup = order_superpixels(permute_col, unique_pix, a_ini, c_ini);\n pure_pix = [];\n start = time.time();\n print(\"Find pure superpixels....\")\n for kk in range(num_patch):\n pos = np.where(patch_ref_mat==kk);\n up=pos[0][0]*patch_height;\n down=min(up+patch_height, dims[0]);\n left=pos[1][0]*patch_width;\n right=min(left+patch_width, dims[1]);\n unique_pix_temp, M = search_superpixel_in_range((connect_mat_1.reshape(dims[0],int(dims[1]/num_plane),num_plane,order=\"F\"))[up:down,left:right], permute_col, c_ini);\n pure_pix_temp = fast_sep_nmf(M, M.shape[1], residual_cut[ii]);\n if len(pure_pix_temp)>0:\n pure_pix = np.hstack((pure_pix, unique_pix_temp[pure_pix_temp]));\n pure_pix = np.unique(pure_pix);\n print(\"time: \" + str(time.time()-start));\n start = time.time();\n print(\"Prepare iterations....\")\n if ii > 0:\n a_ini, c_ini, brightness_rank = prepare_iteration(Yd_res, connect_mat_1, permute_col, pure_pix, a_ini, c_ini);\n a = np.hstack((a, a_ini));\n c = np.hstack((c, c_ini));\n else:\n a, c, b, normalize_factor, brightness_rank = prepare_iteration(Yd, connect_mat_1, permute_col, pure_pix, a_ini, c_ini, more=True);\n print(\"time: \" + str(time.time()-start));\n if a.size==0:\n continue\n if ii == pass_num - 1:\n maxiter = max_iter_fin;\n else:\n maxiter=max_iter;\n if bg:\n a, c, b, fb, ff, res, corr_img_all_r, num_list = update_AC_bg_l2_Y(Yd.reshape(np.prod(dims),-1,order=\"F\"), normalize_factor, a, c, b, ff, fb, dims,\n corr_th_fix, maxiter=maxiter, tol=1e-8, update_after=update_after,\n merge_corr_thr=merge_corr_thr,merge_overlap_thr=merge_overlap_thr, num_plane=num_plane, max_allow_neuron_size=max_allow_neuron_size);\n else:\n a, c, b, fb, ff, res, corr_img_all_r, num_list = update_AC_l2_Y(Yd.reshape(np.prod(dims),-1,order=\"F\"), normalize_factor, a, c, b, dims,\n corr_th_fix, maxiter=maxiter, tol=1e-8, update_after=update_after,\n merge_corr_thr=merge_corr_thr,merge_overlap_thr=merge_overlap_thr, num_plane=num_plane, max_allow_neuron_size=max_allow_neuron_size);\n superpixel_rlt.append({'connect_mat_1':connect_mat_1, 'pure_pix':pure_pix, 'unique_pix':unique_pix, 'brightness_rank':brightness_rank, 'brightness_rank_sup':brightness_rank_sup});\n if pass_num > 1 and ii == 0:\n rlt = {'a':a, 'c':c, 'b':b, \"fb\":fb, \"ff\":ff};\n\n if (idx==0) & (ii==0):\n fin_rlt = {'a':np.zeros((np.prod(Yd.shape[:2]), 1))};\n else:\n fin_rlt = {'a':a, 'c':c, 'b':b, \"fb\":fb, \"ff\":ff};\n if pass_num > 1:\n return {'rlt':rlt, 'fin_rlt':fin_rlt, \"superpixel_rlt\":superpixel_rlt}\n else:\n return {'fin_rlt':fin_rlt, \"superpixel_rlt\":superpixel_rlt}", "def propagate_labels(image,labels,conflict=0):\n rlabels,_ = label(image)\n cors = correspondences(rlabels,labels)\n outputs = zeros(amax(rlabels)+1,'i')\n oops = -(1<<30)\n for o,i in cors.T:\n if outputs[o]!=0: outputs[o] = oops\n else: outputs[o] = i\n outputs[outputs==oops] = conflict\n outputs[0] = 0\n return outputs[rlabels]", "def crf_refine(label,\n img,\n crf_theta_slider_value,\n crf_mu_slider_value,\n crf_downsample_factor):\n l_unique = np.unique(label.flatten())#.tolist()\n # print(l_unique)\n scale = 1+(5 * (np.array(img.shape).max() / 3000))\n # print(scale)\n\n Horig = label.shape[0]\n Worig = label.shape[1]\n # crf_downsample_factor = 2\n # decimate by factor by taking only every other row and column\n img = img[::crf_downsample_factor,::crf_downsample_factor, :]\n # do the same for the label image\n label = label[::crf_downsample_factor,::crf_downsample_factor]\n\n orig_mn = np.min(np.array(label).flatten())\n orig_mx = np.max(np.array(label).flatten())\n\n n = 1+(orig_mx-orig_mn)\n\n label = 1+(label - orig_mn)\n # l_unique = np.unique(label.flatten())#.tolist()\n # print(l_unique)\n\n mn = np.min(np.array(label).flatten())\n mx = np.max(np.array(label).flatten())\n\n n = 1+(mx-mn)\n # print(n)\n\n H = label.shape[0]\n W = label.shape[1]\n U = unary_from_labels(label.astype('int'), n, gt_prob=0.9)\n d = dcrf.DenseCRF2D(H, W, n)\n d.setUnaryEnergy(U)\n\n # to add the color-independent term, where features are the locations only:\n d.addPairwiseGaussian(sxy=(3, 3),\n compat=3,\n kernel=dcrf.DIAG_KERNEL,\n normalization=dcrf.NORMALIZE_SYMMETRIC)\n feats = create_pairwise_bilateral(\n sdims=(crf_theta_slider_value, crf_theta_slider_value), #(60, 60),\n # schan=(2,2,2,2,2,2), #add these when implement 6 band\n schan=(scale,scale,scale),\n img=img,\n chdim=2)\n\n d.addPairwiseEnergy(feats, compat=crf_mu_slider_value, kernel=dcrf.DIAG_KERNEL,normalization=dcrf.NORMALIZE_SYMMETRIC) #260\n Q = d.inference(10)\n result = 1+np.argmax(Q, axis=0).reshape((H, W)).astype(np.uint8)\n # l_unique = np.bincount(result.flatten())#.tolist()\n # print(l_unique)\n\n result = resize(result, (Horig, Worig), order=0, anti_aliasing=True)\n # l_unique = np.unique(result.flatten())#.tolist()\n # print(l_unique)\n\n result = rescale(result, orig_mn, orig_mx).astype(np.uint8)\n # l_unique = np.bincount(result.flatten())#.tolist()\n # print(l_unique)\n\n print(\"CRF post-processing complete\")\n\n return result, n", "def propagate_labels(image, labels, conflict=0):\n rlabels, _ = ndi.label(image)\n cors = correspondences(rlabels, labels)\n outputs = zeros(amax(rlabels) + 1, 'i')\n oops = -(1 << 30)\n for o, i in cors.T:\n if outputs[o] != 0: outputs[o] = oops\n else: outputs[o] = i\n outputs[outputs == oops] = conflict\n outputs[0] = 0\n return outputs[rlabels]", "def cut_image_into_pieces(sr1, sr2, label, sr1_path, sr2_path,\r\n label_path, stride=None, width_size=None, height_size=None):\r\n print('Loading images!')\r\n sr1 = cv2.imread(sr1)\r\n sr2 = cv2.imread(sr2)\r\n label = cv2.imread(label)\r\n if stride is None or stride == 0:\r\n # stride can not be None!\r\n print('Stride can not be None or zero!')\r\n sys.exit(-1)\r\n if width_size is None or height_size is None:\r\n # width or height size can not be None!\r\n print('width or height size can not be None!')\r\n sys.exit(-1)\r\n h, w, c = sr1.shape # get the shape\r\n height_steps = math.ceil((h - height_size) / stride + 1)\r\n wide_steps = math.ceil((w - width_size) / stride + 1)\r\n if wide_steps is 0 or height_steps is 0:\r\n print('Error, this is because stride equals 1 and image size is one larger than output size.')\r\n sys.exit(-1)\r\n if c == 3:\r\n height_fill = (height_steps - 1) * stride + height_size - h # The number of pixels to fill in the height\r\n wide_fill = (wide_steps - 1) * stride + width_size - w # The number of pixels to fill in the width\r\n # fill the border\r\n sr1 = cv2.copyMakeBorder(sr1, 0, height_fill, 0, wide_fill, cv2.BORDER_CONSTANT, value=[0, 0, 0])\r\n sr2 = cv2.copyMakeBorder(sr2, 0, height_fill, 0, wide_fill, cv2.BORDER_CONSTANT, value=[0, 0, 0])\r\n label = cv2.copyMakeBorder(label, 0, height_fill, 0, wide_fill, cv2.BORDER_CONSTANT, value=[0])\r\n print('Cutting images!')\r\n for i in range(height_steps):\r\n for j in range(wide_steps):\r\n label_change = label[i * stride:i * stride + height_size, j * stride:j * stride + width_size]\r\n sr1_pieces = sr1[i * stride:i * stride + height_size, j * stride:j * stride + width_size, :]\r\n sr2_pieces = sr2[i * stride:i * stride + height_size, j * stride:j * stride + width_size, :]\r\n cv2.imwrite(sr1_path + '/' + str(i) + '_' + str(j) + '.tif', sr1_pieces)\r\n cv2.imwrite(sr2_path + '/' + str(i) + '_' + str(j) + '.tif', sr2_pieces)\r\n cv2.imwrite(label_path + '/' + str(i) + '_' + str(j) + '.tif', label_change)\r\n print('Cut completely!')\r\n else:\r\n # program only support 3 channels now!\r\n print('Not support numbers of chanel except 1 and 3!')\r\n sys.exit(-1)", "def extend_labeled_region(sample, insertL = 500):\n labels = set(sample['labels'])\n \n for j in list(labels)[1:]:\n try:\n inds = np.hstack(np.where(sample['labels'] == j)[0])\n inds = np.insert(inds,0,np.arange(inds[0]-insertL, inds[0]))\n jumps = np.hstack(np.where(np.diff(inds) > 2)[0])\n for jump in jumps:\n refLow = inds[jump-1]\n refHigh = inds[jump+1]\n #insert at the end of the first region\n inds = np.insert(inds, jump-1, np.arange(inds[jump-1], inds[jump-1] + insertL))\n #insert at the beginning of the second region\n inds = np.insert(inds, jump+1 + insertL, np.arange(inds[jump+1+insertL] - insertL, inds[jump+1+insertL]))\n #insert at the end of the second region\n inds = np.insert(inds, len(inds), np.arange(inds[-1] , inds[-1] + insertL)) \n sample['labels'].values[inds] = j\n except:\n print(f'no label = {j}')\n\n return sample", "def montage(\n images,\n padding=2,\n pad_color=(0., 0., 0.),\n labels=None,\n label_color=(1, 1, 1)\n):\n if labels:\n images = [\n labeled(img, lbl, text=label_color, background=pad_color)\n for img, lbl in zip(images, labels)\n ]\n\n if padding:\n images = [ frame(img, size=padding, color=pad_color) for img in images ]\n\n sqw = int(math.ceil(len(images)**0.5))\n sqh = sqw\n while len(images) <= sqw * sqh - sqw:\n sqh -= 1\n\n rows = []\n idx = 0\n for i in range(sqh):\n rows.append([])\n for j in range(sqw):\n rows[-1].append(images[idx])\n idx += 1\n if idx >= len(images):\n break\n\n if idx >= len(images):\n break\n\n rowimages = []\n for r in rows:\n rowimages.append(join(r, vertical=False, pad_color=pad_color))\n\n return join(\n [ join(r, vertical=False, pad_color=pad_color) for r in rows ],\n vertical=True,\n pad_color=pad_color\n )", "def split_labels(self):\r\n data = self.labels_df\r\n \r\n thirty_per = (0.3 * len(data))\r\n mid_min = ((len(data)/2) - (0.5*thirty_per))\r\n mid_plus = ((len(data)/2) + (0.5*thirty_per))\r\n data_test = data.iloc[int(mid_min) : int(mid_plus)]\r\n \r\n data_new = data.merge(data_test, how ='left', indicator=True)\r\n data_train = data_new[data_new['_merge'] == 'left_only']\r\n data_train = data_train.drop(columns = '_merge')\r\n \r\n self.labels_train = data_train\r\n self.labels_test = data_test.iloc[:,0]", "def hed_pad(lim, r_stride=320, bsds_path='/media/data_cifs/image_datasets/hed_bsds/HED-BSDS', process_label=False):\n if process_label:\n lim /= 255.\n if len(lim.shape) == 3:\n lim = lim[..., 0]\n lim = lim[..., None]\n lsh = lim.shape\n if lsh[0] > lsh[1]:\n # Flip all to landscape\n lim = lim.transpose((1, 0, 2))\n lsh = lim.shape\n if lsh[0] < r_stride:\n # Pad to 320\n up_offset = (r_stride - lsh[0]) // 2\n down_offset = up_offset\n if up_offset + down_offset + lsh[0] < r_stride:\n down_offset += 1\n elif up_offset + down_offset + lsh[0] > r_stride:\n down_offset -= 1\n pad_up_offset = np.zeros((up_offset, lsh[1], lsh[-1]))\n pad_down_offset = np.zeros((down_offset, lsh[1], lsh[-1]))\n lim = np.concatenate((pad_up_offset, lim, pad_down_offset), 0)\n if lsh[1] < r_stride:\n # Pad to 320\n up_offset = (r_stride - lsh[1]) // 2\n down_offset = up_offset\n if up_offset + down_offset + lsh[1] < r_stride:\n down_offset += 1\n elif up_offset + down_offset + lsh[1] > r_stride:\n down_offset -= 1\n pad_up_offset = np.zeros((lsh[0], up_offset, lsh[-1]))\n pad_down_offset = np.zeros((lsh[0], down_offset, lsh[-1]))\n lim = np.concatenate((pad_up_offset, lim, pad_down_offset), 1)\n return lim" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dilate grains to fill the gap between them. This function calls `dilate_labels` with the grain map of the microstructure.
def dilate_grains(self, dilation_steps=1, dilation_ids=None): if not hasattr(self, 'grain_map'): raise ValueError('microstructure %s must have an associated grain_map attribute' % self.name) return grain_map = self.grain_map.copy() # get rid of overlap regions flaged by -1 grain_map[grain_map == -1] = 0 if hasattr(self, 'mask'): grain_map = Microstructure.dilate_labels(grain_map, dilation_steps=dilation_steps, mask=self.mask, dilation_ids=dilation_ids) else: grain_map = Microstructure.dilate_labels(grain_map, dilation_steps=dilation_steps, dilation_ids=dilation_ids) # finally assign the dilated grain map to the microstructure self.grain_map = grain_map
[ "def dilate_grain(self, grain_id, dilation_steps=1, use_mask=False):\n grain_volume_init = (self.grain_map == grain_id).sum()\n grain_data = self.grain_map == grain_id\n grain_data = ndimage.binary_dilation(grain_data, iterations=dilation_steps).astype(np.uint8)\n if use_mask and hasattr(self, 'mask'):\n grain_data *= self.mask.astype(np.uint8)\n self.grain_map[grain_data == 1] = grain_id\n grain_volume_final = (self.grain_map == grain_id).sum()\n print('grain %s was dilated by %d voxels' % (grain_id, grain_volume_final - grain_volume_init))", "def gap_fill(mcdData, clon, clat, FRLAND, season, ncClim):\n nNS, nEW = clon.shape \n for b in bands:\n bdata = getattr(mcdData,b)\n sdata = ncClim.variables[season + '_' + b][:]\n\n data = np.ma.masked_all([nNS,nEW])\n data[~clon.mask] = bdata \n data[data > 32] = sdata[data > 32]\n data[FRLAND< 0.99] = np.ma.masked\n\n setattr(mcdData,b,data[~clon.mask])", "def recompute_grain_centers(self, verbose=False):\n if not hasattr(self, 'grain_map'):\n print('warning: need a grain map to recompute the center of mass of the grains')\n return\n for g in self.grains:\n try:\n com = self.compute_grain_center(g.id)\n except ValueError:\n print('skipping grain %d' % g.id)\n continue\n if verbose:\n print('grain %d center: %.3f, %.3f, %.3f' % (g.id, com[0], com[1], com[2]))\n g.center = com", "def gap_fill_temporal(rasterlist, outdir = False):\n\n #enforce the list of rasters to ensure it's sanitized\n rasterlist = core.enf_filelist(rasterlist)\n\n #create an empty list to store output arrays in\n arr_list = []\n\n #convert each raster in the input list to an array, and save its data to the list\n for i, raster in enumerate(rasterlist):\n item = rasterlist[i]\n item_arr = to_numpy(item)\n arr_list.append(item_arr[0].data)\n\n #convert the list to a numpy array\n arr_list = np.array(arr_list)\n\n #set the lists of ranges of values for each dimension of the output array\n xrange = range(0, np.shape(arr_list)[2])\n yrange = range(0, np.shape(arr_list)[1])\n zrange = range(0, np.shape(arr_list)[0])\n\n #pull out the first array to be edited\n new_arr = arr_list[0]\n\n #loop through each x, y value\n #if the first array's value at each location is \"Not A Number\",\n #attempt to fill it with the corresponding value from the next array\n #if no array has a corresponding value, it will be left as a \"nan\"\n for i in yrange:\n for j in xrange:\n if np.isnan(new_arr[i,j]) == True:\n x = 1\n while x <= zrange[-1]:\n if np.isnan(arr_list[x,i,j]) == False:\n new_arr[i,j] = arr_list[x,i,j]\n break\n x = x + 1\n\n #separate the filename from the first input array \n inname = os.path.splitext(rasterlist[0])[0]\n\n #create an output name\n if outdir:\n outdir = os.path.abspath(outdir)\n name = \"{0}_gapfilled.tif\".format(os.path.split(inname)[1])\n outname = os.path.join(outdir, name)\n else:\n outname = \"{0}_gapfilled.tif\".format(inname)\n\n #convert the edited array to a tiff\n from_numpy(new_arr, item_arr[1], outname, \"NoData\")\n\n return outname", "def extend_labeled_region(sample, insertL = 500):\n labels = set(sample['labels'])\n \n for j in list(labels)[1:]:\n try:\n inds = np.hstack(np.where(sample['labels'] == j)[0])\n inds = np.insert(inds,0,np.arange(inds[0]-insertL, inds[0]))\n jumps = np.hstack(np.where(np.diff(inds) > 2)[0])\n for jump in jumps:\n refLow = inds[jump-1]\n refHigh = inds[jump+1]\n #insert at the end of the first region\n inds = np.insert(inds, jump-1, np.arange(inds[jump-1], inds[jump-1] + insertL))\n #insert at the beginning of the second region\n inds = np.insert(inds, jump+1 + insertL, np.arange(inds[jump+1+insertL] - insertL, inds[jump+1+insertL]))\n #insert at the end of the second region\n inds = np.insert(inds, len(inds), np.arange(inds[-1] , inds[-1] + insertL)) \n sample['labels'].values[inds] = j\n except:\n print(f'no label = {j}')\n\n return sample", "def disag_upsample(Load, disag_profile, to_offset='h'):\n #First reindexing to the new resolution.\n orig_freq = Load.index.freqstr\n start = Load.index[0]\n end = Load.index[-1] + 1 * Load.index.freq #An extra period is needed at the end to match the sum FIXME\n df1 = Load.reindex(pd.date_range(start, end, freq=to_offset, closed='left'))\n\n def mult_profile(x, profile):\n #Normalizing to keep the sum the same..\n profile = profile / np.sum(profile)\n return x.mean() * profile #using mean() assuming that there is one value and the rest is nan\n\n #then transform per sampled period correspnding to the len(disag_profile)\n return df1.resample(orig_freq).transform(mult_profile, disag_profile).dropna()", "def fill_gaps(self):\n frame_gaps, time_gaps = self.get_frame_gaps()\n max_skip_index = int(np.nanargmax(time_gaps))\n n = frame_gaps[max_skip_index]\n if n == 0:\n return\n if n > 10:\n raise ValueError(\n f\"Large gap of {n} frames at \"\n f\"index {self.frames.fixed_index[max_skip_index]}, \"\n f\"MJD: {self.frames.mjd[max_skip_index]}\")\n\n add_frames = np.clip(frame_gaps, 0, None)\n log.debug(f\"Padding with {add_frames.sum()} empty frames.\")\n\n insert_at = np.nonzero(add_frames)[0]\n insert_indices = []\n for ii in insert_at:\n insert_indices.extend([ii] * add_frames[ii])\n\n insert_indices = np.asarray(insert_indices, dtype=int)\n self.frames.insert_blanks(insert_indices)\n\n # Add bad MJDs so no further blanks are inserted\n inserted_indices = insert_indices + np.arange(insert_indices.size)\n self.frames.mjd[inserted_indices] = np.nan\n self.reindex()", "def rigid_fill(Data,static):\n data = Data.copy()\n missingMarkerName=targetName()\n targetDict = target_dict()\n segmentDict = segment_dict()\n\n missings={}\n\n #Need to do something like this to avoid issues with CGM variants\n # missingMarkerName.remove('LPSI')\n # missingMarkerName.remove('RPSI')\n # missingMarkerName.remove('SACR')\n\n removedMarkers = [name for name in missingMarkerName if name not in data.keys()]\n\n for key in removedMarkers:\n #data[key] = np.empty(shape=(len(data[data.keys()[0]]),3))*np.nan\n data[key] = np.empty(shape=(len(data[list(data.keys())[0]]),3))*np.nan\n\n #always use transform from static for removed markers (new one for every \n #frame)\n if pyver == 2:\n forIter = data.iteritems()\n if pyver == 3:\n forIter = data.items()\n\n for key, val in forIter:\n if key in missingMarkerName and key in removedMarkers:\n traj = data[key]\n\n for i, val in enumerate(traj):\n useables = segmentFinder(key,data,targetDict,segmentDict,i,missings)\n\n if len(useables) < 3:\n print('Cannot reconstruct',key,': no valid cluster')\n continue\n else:\n data[key][i] = transform_from_static(data,static,key,useables,i)\n # try: data[key][i] = transform_from_static(data,static,key,useables,i)\n # except: pass #key might not be used which is why it is missing i.e., LPSI vs SACR\n\n #use last known marker position (start of every gap) for transform\n #during movement trial gaps\n if key in missingMarkerName and key not in removedMarkers:\n traj = data[key]\n gap_bool = False\n last_time = None\n\n missings[key] = []\n for i, val in enumerate(traj):\n if not np.isnan(val[0]):\n gap_bool = False\n last_time = None\n\n continue\n\n if not gap_bool:\n gap_bool = True\n j = i\n\n while j >=0:\n if np.isnan(data[key][j][0]):\n j -= 1\n continue\n\n useables_last = segmentFinder(key,data,targetDict,segmentDict,j,missings)\n\n if len(useables_last) < 3:\n j-=1\n continue\n\n last_time = j\n\n break\n\n #print('The target marker',key,' was visible at',last_time)\n\n if last_time:\n #if np.isnan(data[useables[0]][i][0]) or np.isnan(data[useables[1]][i][0]) or np.isnan(data[useables[2]][i][0]):\n #print('current clust',useables,'invalid for',key,'at frame',i)\n useables_current = segmentFinder(key,data,targetDict,segmentDict,i,missings)\n useables = list(set(useables_last).intersection(useables_current))\n\n if len(useables) < 3:\n print('Not enough cluster markers')\n\n opts = []\n perms = list(itertools.permutations(useables))\n\n for p in perms:\n subset = list(p)\n try:\n est_pos = transform_from_mov(data,key,subset,last_time,i)\n opts.append([subset,np.mean(abs(est_pos - data[key][last_time]))])\n except: pass\n\n useables = min(opts, key = lambda t: t[1])[0]\n\n #print('using new clust',useables,'for key')\n data[key][i] = transform_from_mov(data,key,useables,last_time,i)\n continue\n\n #use static transform for markers missing from the start\n #of the trial only. Make new one for each missing frame.\n if not last_time:\n useables = segmentFinder(key,data,targetDict,segmentDict,i,missings)\n\n if len(useables) < 3:\n print('cannot find valid cluster for',key)\n continue\n\n data[key][i] = transform_from_static(data,static,key,useables,i)\n\n #print transform_from_static(data,static,key,useables,i)\n #record reconstructed frames\n missings[key].append(i)\n\n return data", "def dilate_labels(array, dilation_steps=1, mask=None, dilation_ids=None, struct=None):\n from scipy import ndimage\n if struct is None:\n struct = ndimage.morphology.generate_binary_structure(array.ndim, 1)\n assert struct.ndim == array.ndim\n # carry out dilation in iterative steps\n for step in range(dilation_steps):\n if dilation_ids:\n grains = np.isin(array, dilation_ids)\n else:\n grains = (array > 0).astype(np.uint8)\n grains_dil = ndimage.morphology.binary_dilation(grains, structure=struct).astype(np.uint8)\n if mask is not None:\n # only dilate within the mask\n grains_dil *= mask.astype(np.uint8)\n todo = (grains_dil - grains)\n # get the list of voxel for this dilation step\n if array.ndim == 2:\n X, Y = np.where(todo)\n else:\n X, Y, Z = np.where(todo)\n\n xstart = X - 1\n xend = X + 1\n ystart = Y - 1\n yend = Y + 1\n\n # check bounds\n xstart[xstart < 0] = 0\n ystart[ystart < 0] = 0\n xend[xend > array.shape[0] - 1] = array.shape[0] - 1\n yend[yend > array.shape[1] - 1] = array.shape[1] - 1\n if array.ndim == 3:\n zstart = Z - 1\n zend = Z + 1\n zstart[zstart < 0] = 0\n zend[zend > array.shape[2] - 1] = array.shape[2] - 1\n\n dilation = np.zeros_like(X).astype(np.int16)\n print('%d voxels to replace' % len(X))\n for i in range(len(X)):\n if array.ndim == 2:\n neighbours = array[xstart[i]:xend[i] + 1, ystart[i]:yend[i] + 1]\n else:\n neighbours = array[xstart[i]:xend[i] + 1, ystart[i]:yend[i] + 1, zstart[i]:zend[i] + 1]\n if np.any(neighbours):\n # at least one neighboring voxel in non zero\n dilation[i] = min(neighbours[neighbours > 0])\n if array.ndim == 2:\n array[X, Y] = dilation\n else:\n array[X, Y, Z] = dilation\n print('dilation step %d done' % (step + 1))\n return array", "def create_garden(self):\n\n counter = 0\n while self.garden_area < self.garden_area_target:\n zero = nullgard = zone = zapped = clip = ring_suitability = new_cells = None\n # self.wipe_locks()\n counter += 1\n if s.DEBUG_MODE:\n logging.info('counter: %s' % counter)\n logging.info('garden area pre create_garden: %s' % self.garden_area)\n\n # Set nodata values in garden grid to 0\n zero = arcpy.sa.Con(arcpy.sa.IsNull(self.garden) == 1, 0, self.garden)\n # if s.DEBUG_MODE:\n zero.save(os.path.join(s.TEMP_DIR, \"zero_%s.tif\" % counter))\n\n # Create another grid where current garden is NODATA and all other values = 0\n nullgard = arcpy.sa.SetNull(zero == s.GARDEN_ID, 0)\n # if s.DEBUG_MODE:\n nullgard.save(os.path.join(s.TEMP_DIR, \"nullgard_%s.tif\" % counter))\n\n # Expand potential garden grid by one cell\n zone = arcpy.sa.Expand(self.garden, 1, s.GARDEN_ID)\n # if s.DEBUG_MODE:\n zone.save(os.path.join(s.TEMP_DIR, \"zone_%s.tif\" % counter))\n\n # Create a clipping raster for gardens\n zapped = arcpy.sa.Plus(nullgard, self.local_suitability)\n # if s.DEBUG_MODE:\n zapped.save(os.path.join(s.TEMP_DIR, \"zapped_%s.tif\" % counter))\n\n # Clip expanded garden grid by removing unsuitable areas and places where garden currently exists\n clip = arcpy.sa.ExtractByMask(zone, zapped)\n array = arcpy.RasterToNumPyArray(clip)\n unique_values = numpy.unique(array, return_counts=True)\n value_dict = dict(zip(unique_values[0], unique_values[1]))\n if s.GARDEN_ID not in value_dict.keys():\n logging.info('no new cells can be added')\n break\n\n # if s.DEBUG_MODE:\n clip.save(os.path.join(s.TEMP_DIR, 'clip_%s.tif' % counter))\n\n ring_suitability = arcpy.sa.Con(clip, self.local_suitability)\n # if s.DEBUG_MODE:\n ring_suitability.save(os.path.join(s.TEMP_DIR, 'ring_suitability_%s.tif' % counter))\n\n new_cells = arcpy.sa.Con(ring_suitability == ring_suitability.maximum, s.GARDEN_ID)\n # if s.DEBUG_MODE:\n new_cells.save(os.path.join(s.TEMP_DIR, 'new_cells_%s.tif' % counter))\n\n new_cells_area = self.get_garden_area(new_cells)\n\n if (new_cells_area + self.garden_area) <= self.garden_area_target:\n self.garden = arcpy.sa.Con(zero == s.GARDEN_ID, s.GARDEN_ID,\n arcpy.sa.Con(new_cells == s.GARDEN_ID, s.GARDEN_ID, self.garden))\n\n else:\n random_cells = arcpy.sa.Con(new_cells, self.randrast)\n array = arcpy.RasterToNumPyArray(random_cells)\n random_values = numpy.unique(array).tolist()\n random.shuffle(random_values)\n\n while self.garden_area < self.garden_area_target:\n r = random_values.pop()\n new_cell = arcpy.sa.Con(random_cells == r, s.GARDEN_ID)\n\n self.garden = arcpy.sa.Con(arcpy.sa.IsNull(new_cell) == 0, new_cell, self.garden)\n\n self.garden_area = self.get_garden_area(self.garden)\n # if s.DEBUG_MODE:\n new_cell.save(os.path.join(s.TEMP_DIR, 'new_cell_%s.tif' % counter))\n\n self.garden_area = self.get_garden_area(self.garden)\n del zero, zone, zapped, nullgard, clip, ring_suitability, new_cells\n # utils.clear_dir_by_pattern(s.TEMP_DIR, '.cpg')\n # utils.clear_dir_by_pattern(self.OUTPUT_DIR, '.cpg')\n if s.DEBUG_MODE:\n logging.info('finished create_garden {}'.format(counter))\n\n path_garden = os.path.join(self.OUTPUT_DIR, 'garden_{}.tif'.format(self.year))\n self.garden.save(path_garden)\n logging.info('finished create_garden: {}'.format(self.garden))", "def a2744_glassdatareduction(workdir='/Users/kschmidt/work/JWST/grizly_A2744/Prep',clobber=True):\n print(' - Reducing A2744 GLASS data using ')\n print(' - Grizli version '+grizli.__version__)\n try:\n os.chdir(workdir)\n print(' - Moved to directory '+os.getcwd())\n except:\n sys.exit('Working directory '+workdir+' does not exists')\n\n files = glob.glob('../RAW/*flt.fits')\n info = grizli.utils.get_flt_info(files)\n ascii.write(info,'./rawFLTfileinfo.txt')\n visits, filters = grizli.utils.parse_flt_files(info=info, use_visit=False, uniquename=True)\n\n for i in range(len(visits)):\n print(dict(visits[i]))\n\n gotoextraction = True # <------------------------------------------------------------------- Keyword for skipping\n if not gotoextraction:\n print(' ----------------------- Master Pre-processing ----------------------- ')\n from grizli.prep import process_direct_grism_visit\n print(' - defining visit pairs to process (NEEDS TO BE UPDATED TO INCLUE ALL OBS)')\n visitpairs = [[visits[0],visits[4]],[visits[2],visits[6]],[visits[7],visits[11]], [visits[9],visits[13]]]\n\n runmaster = False # <--------------------------------------------------------------------- Keyword for skipping\n if runmaster:\n for vv, visit in enumerate(visits):\n print(' ----- Visit No. '+str(vv+1)+' ----- ')\n visitfiles = visits[vv]['files']\n print(' Files: '+str(visitfiles))\n for vf in visitfiles:\n infoent = np.where(info['FILE'] == vf)[0]\n print(' '+vf+' infofilter = '+str(info['FILTER'][infoent][0]))\n\n for vp in visitpairs:\n status = process_direct_grism_visit(direct=vp[0], grism=vp[1],\n radec='../hst_a2744_60mas_merged_radec_F140Wlt24.txt',\n align_mag_limits=[14,23])\n else:\n print('Skipping master pre-processing')\n\n shiftlogs = glob.glob('*shifts.log')\n print(' - For info on shifts, check '+''.join(shiftlogs))\n # !ls *shifts.log\n # print('')\n # !cat *shifts.log\n\n print(' ----------------------- Grouping FLTs ----------------------- ')\n all_grism_files = []\n grismvisits = [vp[1] for vp in visitpairs]\n for i in range(len(grismvisits)):\n all_grism_files.extend(grismvisits[i]['files'])\n\n print(' - Grism files (all_grism_files) to group: '+str(all_grism_files))\n refimgpath = '/Users/kschmidt/work/images_MAST/images_fullfov/'\n grp = GroupFLT(grism_files=all_grism_files, direct_files=[],\n ref_file=refimgpath+'refimage_hlsp_frontier_hst_wfc3-60mas_abell2744_f140w_v1.0_drz.fits',\n seg_file=refimgpath+'refimage_hlsp_frontier_hst_wfc3-60mas_abell2744_f140w_v1.0_drz_seg.fits',\n catalog='/Users/kschmidt/work/catalogs/GLASScatalogs/GLASScatalog_A2744_150515.cat',\n cpu_count=8)\n\n\n print(' ----------------------- Generate Continuum Model ----------------------- ')\n genmodels = False # <------------------------------------------------------------------- Keyword for skipping\n if genmodels:\n grp.compute_full_model(mag_limit=25)\n\n print(' - Plotting: Show FLT residuals')\n fig = plt.figure(figsize=[12,6])\n ax = fig.add_subplot(121)\n ax.imshow(grp.FLTs[0].grism['SCI'] - grp.FLTs[0].model, vmin=-0.02, vmax=0.2, cmap='cubehelix_r',\n interpolation='Nearest', origin='lower')\n ax.set_title('G102, %s' %(grp.FLTs[0].grism.parent_file))\n\n ax = fig.add_subplot(122)\n ax.imshow(grp.FLTs[4].grism['SCI'] - grp.FLTs[4].model, vmin=-0.02, vmax=0.2, cmap='cubehelix_r',\n interpolation='Nearest', origin='lower')\n ax.set_title('G141, %s' %(grp.FLTs[4].grism.parent_file))\n\n for ax in fig.axes:\n #ax.set_xlim(500,700); ax.set_ylim(500,700)\n ax.set_xlim(100,1500); ax.set_ylim(100,1500)\n fig.savefig('./FLTresiduals_continuum_model.pdf')\n\n else:\n print('Skipping continuum model')\n\n\n print(' ----------------------- Generate Polynomial Model ----------------------- ')\n if genmodels:\n grp.refine_list(poly_order=2, mag_limits=[16, 24], verbose=False)\n\n\n print(' - Plotting: Show FLT residuals')\n fig = plt.figure(figsize=[12,6])\n ax = fig.add_subplot(121)\n ax.imshow(grp.FLTs[0].grism['SCI'] - grp.FLTs[0].model, vmin=-0.02, vmax=0.2, cmap='cubehelix_r',\n interpolation='Nearest', origin='lower')\n ax.set_title('G102, %s' %(grp.FLTs[0].grism.parent_file))\n\n ax = fig.add_subplot(122)\n ax.imshow(grp.FLTs[4].grism['SCI'] - grp.FLTs[4].model, vmin=-0.02, vmax=0.2, cmap='cubehelix_r',\n interpolation='Nearest', origin='lower')\n ax.set_title('G141, %s' %(grp.FLTs[4].grism.parent_file))\n\n for ax in fig.axes:\n #ax.set_xlim(500,700); ax.set_ylim(500,700)\n ax.set_xlim(100,1500); ax.set_ylim(100,1500)\n fig.savefig('./FLTresiduals_polynomial_model.pdf')\n else:\n print('Skipping Polynomial model')\n\n\n print(' ----------------------- Save Models ----------------------- ')\n if genmodels:\n grp.save_full_data()\n else:\n print(' did not generate models, so did not save models to disk')\n\n else:\n print('\\n NB\\n - Going directly to spectral extraction...')\n\n print(' ----------------------- Prepare Fitting Spectra ----------------------- ')\n all_grism_files = ['ica501u3q_flt.fits', 'ica501uaq_flt.fits', 'ica501uhq_flt.fits', 'ica501uoq_flt.fits',\n 'ica501tbq_flt.fits', 'ica501tiq_flt.fits', 'ica501tpq_flt.fits', 'ica501twq_flt.fits',\n 'ica503fwq_flt.fits', 'ica503g3q_flt.fits', 'ica503gaq_flt.fits', 'ica503ghq_flt.fits',\n 'ica503ecq_flt.fits', 'ica503ejq_flt.fits', 'ica503eqq_flt.fits', 'ica503f5q_flt.fits']\n\n print(' - Grism files (all_grism_files) to group: '+str(all_grism_files))\n refimgpath = '/Users/kschmidt/work/images_MAST/images_fullfov/'\n grp = GroupFLT(grism_files=all_grism_files, direct_files=[],\n ref_file=refimgpath+'refimage_hlsp_frontier_hst_wfc3-60mas_abell2744_f140w_v1.0_drz.fits',\n seg_file=refimgpath+'refimage_hlsp_frontier_hst_wfc3-60mas_abell2744_f140w_v1.0_drz_seg.fits',\n catalog='/Users/kschmidt/work/catalogs/GLASScatalogs/GLASScatalog_A2744_150515.cat',\n cpu_count=8)\n\n print(' ----------------------- Setting up templates for fits ----------------------- ')\n # First is set with combined emission line complexes for the redshift fit\n # (don't allow infinite freedom) of the line ratios / fluxes\n templ0 = grizli.utils.load_templates(fwhm=1200, line_complexes=True, stars=False,\n full_line_list=None, continuum_list=None,\n fsps_templates=True)\n\n # Second set has individual line templates for fitting the line fluxes\n templ1 = grizli.utils.load_templates(fwhm=1200, line_complexes=False, stars=False,\n full_line_list=None, continuum_list=None,\n fsps_templates=True)\n\n # Show the template names / dictionary keys\n fmt = '{0:<36s} {1:<36s}'\n print(fmt.format('templ0', 'templ1'))\n print(fmt.format('------', '------'))\n\n for i in range(len(templ1)):\n if i > len(templ0)-1:\n print(fmt.format('', list(templ1.keys())[i]))\n else:\n print(fmt.format(list(templ0.keys())[i], list(templ1.keys())[i]))\n\n # Parameters for drizzled line maps\n pline = {'kernel': 'point', 'pixfrac': 0.2, 'pixscale': 0.1, 'size': 8, 'wcs': None}\n\n\n print(' ----------------------- Pull out individual objects ----------------------- ')\n # grp `GroupFLT` object created as defined in WFC3IR_Reduction from the WFC3 ERS grism data\n target = 'glass_a2744'\n\n # ELs Cont Cont\n ids = [161, 316, 694]\n\n for id in ids:\n # Pull out the 2D cutouts\n beams = grp.get_beams(id, size=80)\n mb = grizli.multifit.MultiBeam(beams, fcontam=0.5, group_name=target, psf=False)\n\n # Save a FITS file with the 2D cutouts (beams) from the individual exposures\n mb.write_master_fits()\n\n # Fit polynomial model for initial continuum subtraction\n wave = np.linspace(2000,2.5e4,100)\n poly_templates = grizli.utils.polynomial_templates(wave, order=7)\n pfit = mb.template_at_z(z=0, templates=poly_templates, fit_background=True,\n fitter='lstsq', get_uncertainties=2)\n\n # Drizzle grisms / PAs\n hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=0.2, flambda=False, kernel='point',\n size=32, zfit=pfit)\n\n # Save drizzle figure FITS file\n fig.savefig('{0}_{1:05d}.stack.png'.format(target, id))\n hdu.writeto('{0}_{1:05d}.stack.fits'.format(target, id), clobber=True)\n\n\n print(' ----------------------- Run wrapper on object '+str(id)+'----------------------- ')\n # High-level wrapper script for doing everything (redshift fits, line fluxes, drizzled line\n # maps). More explanation of the details of individual steps TBD.\n #\n # Needs to be able to find {target}_{id:05d}.beams.fits and {target}_{id:05d}.stack.fits\n # generated above\n out = grizli.fitting.run_all(id, t0=templ0, t1=templ1, fwhm=1200,\n zr=[0.1, 1.7], dz=[0.004, 0.0005],\n fitter='nnls', group_name=target, prior=None, fcontam=0.,\n pline=pline, mask_sn_limit=7, fit_beams=True, fit_stacks=False,\n root=target+'_', fit_trace_shift=False, verbose=False,\n phot=None, scale_photometry=False, show_beams=True)\n\n\n print(\" - For analyzing fit output see\\n\"\n \" https://github.com/gbrammer/grizli/blob/master/examples/NewSpectrumFits.ipynb\")", "def sector_disaggregation_generalized(fbs, group_cols):\n\n # load naics 2 to naics 6 crosswalk\n cw_load = load_sector_length_crosswalk_w_nonnaics()\n\n # for loop min length to 6 digits\n length = min(fbs['Sector'].apply(lambda x: len(x)).unique())\n # appends missing naics levels to df\n for i in range(length, 6):\n\n sector_merge = 'NAICS_' + str(i)\n sector_add = 'NAICS_' + str(i+1)\n\n # subset the df by naics length\n cw = cw_load[[sector_merge, sector_add]]\n # only keep the rows where there is only one value in sector_add for a value in sector_merge\n cw = cw.drop_duplicates(subset=[sector_merge], keep=False).reset_index(drop=True)\n sector_list = cw[sector_merge].values.tolist()\n\n # subset df to sectors with length = i and length = i + 1\n df_subset = fbs[fbs['Sector'].apply(lambda x: i + 1 >= len(x) >= i)]\n # create new columns that are length i\n df_subset = df_subset.assign(Sector_tmp=df_subset['Sector'].apply(lambda x: x[0:i]))\n # subset the df to the rows where the tmp sector columns are in naics list\n df_subset = df_subset.loc[df_subset['Sector_tmp'].isin(sector_list)]\n # drop all rows with duplicate temp values, as a less aggregated naics exists\n group_cols = [e for e in group_cols if e not in ('Sector')]\n group_cols.append('Sector_tmp')\n df_subset2 = df_subset.drop_duplicates(subset=group_cols,\n keep=False).reset_index(drop=True)\n # merge the naics cw\n new_naics = pd.merge(df_subset2, cw[[sector_merge, sector_add]],\n how='left', left_on=['Sector_tmp'], right_on=[sector_merge])\n # add column counting the number of child naics associated with a parent\n new_naics = new_naics.assign(sector_count=new_naics.groupby(['Location', 'Sector_tmp'])['Sector_tmp'].transform('count'))\n # only keep the rows where the count is 1\n new_naics2 = new_naics[new_naics['sector_count'] == 1]\n del new_naics2['sector_count']\n # issue warning if rows with more than one child naics that get dropped - will need method of estimation\n missing_naics = new_naics[new_naics['sector_count'] > 1]\n if len(missing_naics) > 0:\n missing_naics = missing_naics[['Location', 'Sector']].values.tolist()\n log.warning('There is data at sector length ' + str(i) + ' that is lost at sector length ' + str(i+1) +\n ' for ' + str(missing_naics))\n new_naics2 = new_naics2.rename(columns={sector_add: \"ST\"})\n new_naics2 = new_naics2.drop(columns=[sector_merge])\n # drop columns and rename new sector columns\n new_naics2 = new_naics2.drop(columns=[\"Sector\", \"Sector_tmp\"])\n new_naics2 = new_naics2.rename(columns={\"ST\": \"Sector\"})\n # append new naics to df\n if len(new_naics2) > 1:\n fbs = pd.concat([fbs, new_naics2], sort=True)\n\n return fbs", "def from_dct(label=1, data_dir='.'):\n grain_path = os.path.join(data_dir, '4_grains', 'phase_01', 'grain_%04d.mat' % label)\n grain_info = h5py.File(grain_path)\n g = Grain(label, Orientation.from_rodrigues(grain_info['R_vector'].value))\n g.center = grain_info['center'].value\n # add spatial representation of the grain if reconstruction is available\n grain_map_path = os.path.join(data_dir, '5_reconstruction', 'phase_01_vol.mat')\n if os.path.exists(grain_map_path):\n with h5py.File(grain_map_path, 'r') as f:\n # because how matlab writes the data, we need to swap X and Z axes in the DCT volume\n vol = f['vol'].value.transpose(2, 1, 0)\n from scipy import ndimage\n grain_data = vol[ndimage.find_objects(vol == label)[0]]\n g.volume = ndimage.measurements.sum(vol == label)\n # create the vtk representation of the grain\n g.add_vtk_mesh(grain_data, contour=False)\n return g", "def fill_missing_average6(df_missing, df):\n l_ind = get_xyz_ranges(df)\n for cell in df_missing.index.tolist():\n fill_cell_neighbours(df, cell, l_ind)\n return df", "def augmentation(dataset, labels):\n\n print(\"Augmentation\")\n\n # if necessary create aug dir and make sure it's empty\n if not os.path.exists(config.aug_dir):\n os.makedirs(config.aug_dir)\n else:\n os.system('rm -rf %s/*' % config.aug_dir)\n\n # sort ids based on category\n split_categories = {0: [], 1: []}\n for id in dataset:\n split_categories[labels[id]].append(id)\n\n # calculate the amount of missing images to be augmented\n missing = {0: max(0, config.class_total - len(split_categories[0])), 1: max(0, config.class_total - len(split_categories[1]))}\n print(\" missing \" + config.class0 + \" data: \", missing[0])\n print(\" missing \" + config.class1 + \" data: \", missing[1])\n\n cnt = 0\n\n # loop over categories\n for cat in split_categories:\n\n # loop over missing repetitions of whole dataset\n for rep_idx in range(math.floor(missing[cat] / len(split_categories[cat]))):\n\n # loop over ids in dataset\n for id in split_categories[cat]:\n\n aug_name = \"aug\" + str(cnt) + \"_\" + id\n\n # update labels + dataset\n labels[aug_name] = cat\n dataset = np.append(dataset, aug_name)\n\n # augment image + save\n aug_image = mixing(id, split_categories[cat])\n np.save(config.aug_dir + aug_name + \".npy\", aug_image)\n\n cnt += 1\n\n # loop over rest of the missing images\n for rest_idx in range(missing[cat] % len(split_categories[cat])):\n\n id = split_categories[cat][rest_idx]\n aug_name = \"aug\" + str(cnt) + \"_\" + id\n\n # update labels + dataset\n labels[aug_name] = cat\n dataset = np.append(dataset, aug_name)\n\n # augment image + save\n aug_image = mixing(id, split_categories[cat])\n np.save(config.aug_dir + aug_name + \".npy\", aug_image)\n\n cnt += 1\n\n return dataset, labels", "def Gravity_BGG(self):\n for clump in self.clumps:\n dx = clump.x\n dy = clump.y\n dz = clump.z\n dr = np.sqrt(dx**2 + dy**2 + dz**2)\n m_inside = self.Mass_BGG(dr)\n a = G * m_inside / dr**2\n clump.ax += -a * dx / dr\n clump.ay += -a * dy / dr\n clump.az += -a * dz / dr", "def merged_mask(basins, ds, lon_name='lon', lat_name='lat', merge_dict = None, verbose=False):\n mask = basins.mask(ds,lon_name=lon_name, lat_name=lat_name)\n\n def find_mask_index(name):\n target_value = [ri for ri in range(len(basins.regions)) if basins.regions[ri].name == name]\n if len(target_value) > 1:\n warnings.warn(f\"Found more than one matching region for {name}\")\n return target_value[0]\n elif len(target_value) == 1:\n return target_value[0]\n else:\n return None\n \n \n if merge_dict is None:\n merge_dict = _default_merge_dict()\n \n dict_keys = list(merge_dict.keys())\n number_dict = {k:None for k in dict_keys}\n merged_basins = []\n for ocean, small_basins in merge_dict.items():\n# ocean_idx = find_mask_index(ocean)\n try:\n ocean_idx = basins.map_keys(ocean)\n except(KeyError):\n #The ocean key is new and cant be found in the previous keys (e.g. for Atlantic full or maritime continent)\n ocean_idx = mask.max().data + 1\n number_dict[ocean] = ocean_idx\n if small_basins:\n for sb in small_basins:\n sb_idx = basins.map_keys(sb)\n #set the index of each small basin to the ocean value\n mask = mask.where(mask!=sb_idx, ocean_idx)\n merged_basins.append(sb)\n \n if verbose:\n remaining_basins = [str(basins.regions[ri].name) for ri in range(len(basins.regions)) if (basins.regions[ri].name not in merged_basins) and (basins.regions[ri].name not in list(merge_dict.keys()))]\n print(remaining_basins)\n\n #reset the mask indicies to the order of the passed dictionary keys\n mask_reordered = xr.ones_like(mask.copy()) * np.nan\n for new_idx, k in enumerate(dict_keys):\n old_idx = number_dict[k]\n mask_reordered = mask_reordered.where(mask!=old_idx, new_idx)\n\n return mask_reordered", "def set(self, g_imp, mu):\n if not(self.filling is None):\n assert False, 'todo, filling feature not implented yet'\n #mu = self.find_and_set_mu(self.filling, se_lat, mu, self.dmu_max)\n if self.transf is not None:\n g_imp = self.transf.backtransform_g(g_imp)\n self.g_lat_initdict['g_r'] = self.calc_g_r(g_imp)\n self.g_lat = LatticeGreensfunction(**self.g_lat_initdict)\n self.g_lat.periodize()\n self.g_cluster = self.set_g_cluster(self.g_cluster, self.g_lat)\n for ri, rj in itt.product(*[self.r_cavity]*2):\n self.g_cavity[ri, rj] = self.g_lat[ri, rj].copy()\n for ra, rb in itt.product(*[self.r_cluster]*2):\n for s, b in self.g_cavity[ri, rj]:\n b -= self.g_lat[ri, ra][s] * self.g_lat.inverse_real_space_at(ra, rb)[\n s] * self.g_lat[rb, rj][s]\n self.lambd.zero()\n for ila, icl in self.lat_r_to_cluster.items():\n ra, rb = ila[0], ila[1]\n bo, bi, bj = ila[2], ila[3], ila[4]\n for ri, rj in itt.product(*[self.r_cavity]*2):\n self.lambd[icl[0]][icl[1], icl[2]] += self.hopping_lat[ra, ri][bo][bi, bj] * \\\n self.g_cavity[ri, rj][bo][bi, bj] * \\\n self.hopping_lat[rj, rb][bo][bi, bj]\n\n if self.transf is not None:\n self.lambda_imp_basis = self.transf.transform_g(self.lambd)\n self << self.transf.transform_g(self.g_cluster)\n else:\n self.lambda_imp_basis << self.lambd\n self << self.g_cluster\n return mu", "def lung_segmentation(patient_dir):\n\n \"\"\" LOAD THE IMAGE \"\"\"\n\n # Initialize image and get dcm files\n dcm_list = glob(patient_dir + '/*.dcm')\n img = np.zeros((len(dcm_list), 512, 512), dtype='float32')\n z = []\n\n # For each dcm file, get the corresponding slice, normalize HU values, and store the Z position of the slice\n for i, f in enumerate(dcm_list):\n dcm = dicom.read_file(f)\n img[i] = float(dcm.RescaleSlope) * dcm.pixel_array.astype('float32') + float(dcm.RescaleIntercept)\n z.append(dcm.ImagePositionPatient[-1])\n\n # Get spacing and reorder slices\n spacing = map(float, dcm.PixelSpacing) + [np.median(np.diff(np.sort(z)))]\n img = img[np.argsort(z)]\n\n \"\"\" NORMALIZE HU AND RESOLUTION \"\"\"\n\n # Clip and normalize\n img = np.clip(img, -1024, 4000)\n img = (img + 1024.) / (4000 + 1024.)\n\n # Rescale 1mm x 1mm x 1mm\n new_shape = map(lambda x, y: int(x * y), img.shape, spacing[::-1])\n img = resize(img, new_shape, preserve_range=True)\n\n \"\"\" SEGMENT LUNGS USING THRESHOLDING + MORPHOLOGY + SIMPLE RULES \"\"\"\n\n # Threshold the image\n middle = img.shape[0] / 2\n data = img[middle].flatten()\n data = data[data > 0][:, None]\n kmeans = KMeans(n_clusters=2).fit(data)\n threshold = np.mean(kmeans.cluster_centers_.flatten())\n thresh_img = np.where(img < threshold, 1.0, 0.0)\n thresh_img[img == 0.] = 0.\n\n # Clean the image\n thresh_img = morphology.binary_erosion(thresh_img, np.ones([3, 3, 3]))\n\n # Detect connexity\n labels = measure.label(thresh_img)\n regions = measure.regionprops(labels)\n good_labels = []\n\n regions = filter(lambda x: x.area > 500000, regions)\n\n for prop in regions:\n B = prop.bbox\n lim = img.shape[1] / 3\n area_center = np.sum((labels == prop.label)[:, lim:2 * lim, :])\n\n # Big enough area (1,2,3), not too close to the image border, and with most area in the center\n if B[5] - B[2] > 1 / 4. * img.shape[2] \\\n and B[3] - B[0] > 1 / 4. * img.shape[0] \\\n and np.sum(B[:3]) > 10 \\\n and area_center > 0.3 * prop.area:\n good_labels.append(prop.label)\n\n lungmask = np.sum([labels == i for i in good_labels], axis=0)\n\n # Get the entire lung with a big dilation (should use ball(15) but it's too slow)\n for i in range(6):\n lungmask = morphology.binary_dilation(lungmask, np.ones((5, 5, 5)))\n for i in range(4):\n lungmask = morphology.binary_erosion(lungmask, np.ones((5, 5, 5)))\n\n \"\"\" CENTER AND PAD TO GET SHAPE (384, 288, 384) \"\"\"\n\n # Center the image\n\n sum_x = np.sum(lungmask, axis=(0, 1))\n sum_y = np.sum(lungmask, axis=(0, 2))\n sum_z = np.sum(lungmask, axis=(1, 2))\n\n mx = np.nonzero(sum_x)[0][0]\n Mx = len(sum_x) - np.nonzero(sum_x[::-1])[0][0]\n my = np.nonzero(sum_y)[0][0]\n My = len(sum_y) - np.nonzero(sum_y[::-1])[0][0]\n mz = np.nonzero(sum_z)[0][0]\n Mz = len(sum_z) - np.nonzero(sum_z[::-1])[0][0]\n\n img = img * lungmask\n img = img[mz:Mz, my:My, mx:Mx]\n\n # Pad the image to (384, 288, 384)\n nz, nr, nc = img.shape\n\n pad1 = int((384 - nz) / 2)\n pad2 = 384 - nz - pad1\n pad3 = int((288 - nr) / 2)\n pad4 = 288 - nr - pad3\n pad5 = int((384 - nc) / 2)\n pad6 = 384 - nc - pad5\n\n # Crop images too big\n if pad1 < 0:\n img = img[:, -pad1:384 - pad2]\n pad1 = pad2 = 0\n if img.shape.shape[0] == 383:\n pad1 = 1\n\n if pad3 < 0:\n img = img[:, :, -pad3:288 - pad4]\n pad3 = pad4 = 0\n if img.shape.shape[1] == 287:\n pad3 = 1\n\n if pad5 < 0:\n img = img[:, :, -pad5:384 - pad6]\n pad5 = pad6 = 0\n if img.shape.shape[2] == 383:\n pad5 = 1\n\n # Pad\n img = np.pad(img, pad_width=((pad1 - 4, pad2 + 4), (pad3, pad4), (pad5, pad6)), mode='constant')\n # The -4 / +4 is here for \"historical\" reasons, but it can be removed\n\n return img" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Crop the microstructure to create a new one.
def crop(self, x_start, x_end, y_start, y_end, z_start, z_end): micro_crop = Microstructure() micro_crop.name = self.name + '_crop' print('cropping microstructure to %s' % micro_crop.name) micro_crop.grain_map = self.grain_map[x_start:x_end, y_start:y_end, z_start:z_end] if hasattr(self, 'mask'): micro_crop.mask = self.mask[x_start:x_end, y_start:y_end, z_start:z_end] grain_ids = np.unique(micro_crop.grain_map) for gid in grain_ids: if not gid > 0: continue micro_crop.grains.append(self.get_grain(gid)) print('%d grains in cropped microstructure' % len(micro_crop.grains)) return micro_crop
[ "def test_copy_part(self):\n framework = Framework(config_path=config_path)\n assembly = Framework.reader(framework.skeleton, settings=SETTINGS)\n new_fw = assembly.fw.copy_part()\n new_assembly = assembly.copy_part()\n\n assert id(assembly) != id(new_assembly)\n assert id(new_fw) != id(assembly.fw)", "def crop(self, length, step, drop_last=False, fill_value=0):\n length = parse_depth(length, check_positive=True, var_name=\"length\")\n if drop_last:\n self._check_segment_lengths(length)\n wells = self.iter_level(-2)\n for well in wells:\n well.segments = [\n Well(segments=segment.crop(length, step, drop_last, fill_value))\n for segment in well\n ]\n return self", "def unfilled_copy(self):\n copy = Region(self.image, target=self.target, seed_vox=self.pos_to_vox(self.seed_pos))\n copy.bias_against_merge = self.bias_against_merge\n copy.move_based_on_new_mask = self.move_based_on_new_mask\n\n return copy", "def clear_microscope_crop(self):\n self.config['camera_microscope'].stop_camera()\n self.config['camera_microscope'].clear_ROI()\n self.config['camera_microscope'].start_free_run()", "def clean(shapefile):\n\tshapefile['geometry'] = shapefile.buffer(0)\n\treturn shapefile", "def trim_crop_image(original_img, trim_size):\n nudimx = original_img.width - 2*trim_size \n nudimy = original_img.height - 2*trim_size\n new = SimpleImage.blank(nudimx, nudimy)\n for y in range(new.height):\n for x in range(new.width):\n newx = x + trim_size-1\n newy = y + trim_size-1\n new.set_pixel(x, y, original_img.get_pixel(newx, newy))\n return new", "def copy(self):\n return ReadStructure(self.structure)", "def crop(node):\n if node.k <= 3 or not is_padded(node):\n return node\n else:\n return crop(inner(node))", "def cropImage():", "def crop(self, bb, zeropad=True):\n assert isinstance(bb, vipy.geometry.BoundingBox), \"Invalid input\"\n bb = bb.int()\n bbc = bb.clone().imclipshape(self.width(), self.height()).int()\n #if zeropad and bb != bbc:\n # self.zeropad(bb.width()-bbc.width(), bb.height()-bbc.height()) \n # bb = bb.offset(bb.width()-bbc.width(), bb.height()-bbc.height()) \n super().crop(bb, zeropad=zeropad) # range check handled here to correctly apply zeropad\n bb = bb if zeropad else bbc\n self._tracks = {k:t.offset(dx=-bb.xmin(), dy=-bb.ymin()) for (k,t) in self.tracks().items()}\n return self", "def replicate_morphology(self):\n\n my_morphology = MorphologyFST(parent_directory=self.directory)\n\n if not self.morphology.rich_upper:\n dictionary_path = self.morphology.get_file_path('dictionary')\n if os.path.isfile(dictionary_path):\n replicated_dictionary_path = my_morphology.get_file_path('dictionary')\n self.copy_file(dictionary_path, replicated_dictionary_path)\n\n script_path = self.morphology.get_file_path('script')\n if os.path.isfile(script_path):\n replicated_script_path = my_morphology.get_file_path('script')\n self.copy_file(script_path, replicated_script_path)", "def detach_iso(self):\n raise NotImplementedError", "def _prep_frame(self, frame):\n if frame is None:\n return frame\n if len(frame.shape) < 3:\n frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)\n frame = cv2.resize(frame, self.shape, fx=self.fx, fy=self.fy)\n return frame", "def copy(self):\r\n U = CatMorphism(self.name,self.source,self.target)\r\n U.set_mapping_matrix(self.get_mapping_matrix())\r\n\r\n return U", "def _restore(self, backup_copy):\n self.spec = copy.deepcopy(backup_copy)\n self.pattern = self.spec['pattern']\n self.properties = self.spec['properties'] # mandatory part", "def clean(self):\n self.delete_invalid_geometries(\n query_small_area=lambda feat: \"_part\" not in feat[\"localId\"]\n )\n self.topology()\n self.merge_building_parts()\n self.simplify()\n self.delete_small_geometries()", "def copy(self, parent):\n out = Block(self.type)\n out.pins = dict((k, v.copy(out)) for k, v in self.pins.items())\n out.mirrored = self.mirrored\n out.rotation = self.rotation\n\n out.name = self.name\n out.groups = self.groups\n\n out.size = self.size\n out.field = parent or self\n\n return out", "def test_deepcopied(self):\n ############################################################\n # Test if the MayaVi2 visualization can be deep-copied.\n\n # Pop the source object.\n s = self.scene\n source = s.children.pop()\n # Add it back to see if that works without error.\n s.children.append(source) \n cp = source.children[0].children[-1]\n s = self.scene\n\n self.check()\n\n # Now deepcopy the source and replace the existing one with\n # the copy. This basically simulates cutting/copying the\n # object from the UI via the right-click menu on the tree\n # view, and pasting the copy back.\n source1 = copy.deepcopy(source)\n s.children[0] = source1\n s = self.scene\n self.check()\n #from enthought.mayavi.tools.show import show\n #show()", "def deepCopy(self):\n # 1) new part\n part = self.newPart()\n for key, vhelix in self._virtual_helices:\n # 2) Copy VirtualHelix\n part._virtual_helices[key] = vhelix.deepCopy(part)\n # end for\n # 3) Copy oligos\n for oligo, val in self._oligos:\n strandGenerator = oligo.strand5p().generator3pStrand()\n strand_type = oligo.strand5p().strandType()\n new_oligo = oligo.deepCopy(part)\n last_strand = None\n for strand in strandGenerator:\n id_num = strand.virtualHelix().number()\n newVHelix = part._virtual_helices[id_num]\n new_strandset = newVHelix().getStrandSetByType(strand_type)\n new_strand = strand.deepCopy(new_strandset, new_oligo)\n if last_strand:\n last_strand.setConnection3p(new_strand)\n else:\n # set the first condition\n new_oligo.setStrand5p(new_strand)\n new_strand.setConnection5p(last_strand)\n new_strandset.addStrand(new_strand)\n last_strand = new_strand\n # end for\n # check loop condition\n if oligo.isLoop():\n s5p = new_oligo.strand5p()\n last_strand.set3pconnection(s5p)\n s5p.set5pconnection(last_strand)\n # add to part\n oligo.add()\n # end for\n return part" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the center of masses of a grain given its id.
def compute_grain_center(self, gid): # isolate the grain within the complete grain map slices = ndimage.find_objects(self.grain_map == gid) if not len(slices) > 0: raise ValueError('warning grain %d not found in grain map' % gid) sl = slices[0] offset = np.array([sl[0].start, sl[1].start, sl[2].start]) grain_data_bin = (self.grain_map[sl] == gid).astype(np.uint8) local_com = ndimage.measurements.center_of_mass(grain_data_bin) com = self.voxel_size * (offset + local_com - 0.5 * np.array(self.grain_map.shape)) return com
[ "def get_center_of_mass(self):\n atomos = self.get_coords()\n M = self.mol_weight\n centro = Matrix([[0, 0, 0]])\n for a in atomos:\n centro = centro + (Matrix([[a[1], a[2], a[3]]]) * PERIODIC_TABLE[a[0]][\"mass\"])\n centro *= (1 / M)\n return centro", "def recompute_grain_centers(self, verbose=False):\n if not hasattr(self, 'grain_map'):\n print('warning: need a grain map to recompute the center of mass of the grains')\n return\n for g in self.grains:\n try:\n com = self.compute_grain_center(g.id)\n except ValueError:\n print('skipping grain %d' % g.id)\n continue\n if verbose:\n print('grain %d center: %.3f, %.3f, %.3f' % (g.id, com[0], com[1], com[2]))\n g.center = com", "def center_of_mass(data: sc.DataArray) -> sc.Variable:\n summed = data.sum(list(set(data.dims) - set(data.meta['position'].dims)))\n v = sc.values(summed.data)\n return sc.sum(summed.meta['position'] * v) / v.sum()", "def center(atoms):\n base_atoms = {'N1', 'C2', 'N3', 'C4', 'C5','C6','O2', 'O4', 'N4'} # pyrimidine atoms \n base_atoms.update({'N2', 'O6', 'N6', 'N7','C8', 'N9'}) # purine specific ones \n cx,cy,cz = 0,0,0\n cpt =0\n for a in atoms : \n if(a.atom_label in base_atoms):\n cx += float(a.x)\n cy += float(a.y)\n cz += float(a.z)\n cpt+=1\n if(cpt >0):\n return (cx/cpt, cy/cpt, cz/cpt)\n else:\n return (0,0,0)", "def getCenterOfMass(self, atoms=[]):\n\n atoms = np.array(atoms)\n center = np.zeros(3, np.float64); mass = 0.0\n if len(atoms) == 0:\n atoms = np.arange(0, len(self.atoms), 1)\n else:\n atoms = atoms - 1\n\n for i in atoms:\n center += self.atoms[i].realMass * self.atoms[i].coord\n mass += self.atoms[i].realMass\n center /= mass\n\n return center", "def center_of_mass(self):\n return self._center_of_mass", "def pickCenter(self, id_, distance, fromPosition=None):\n\n # find max distance position(s)\n if fromPosition is None:\n\n # in the whole segment\n max_dist = scipy.ndimage.maximum(\n distance, labels=self.data, index=id_)\n max_indices = numpy.nonzero((distance==max_dist) & (self.data==id_))\n\n else:\n \n # find neighborhood of fromPosition on the id_\n se = scipy.ndimage.generate_binary_structure(self.ndim, self.ndim)\n hood = numpy.zeros(shape=self.data.shape, dtype=bool)\n hood[tuple(fromPosition)] = True\n hood = scipy.ndimage.binary_dilation(hood, structure=se)\n hood[self.data != id_] = False\n if not hood.any():\n return None\n\n # max distance(s) in the hood\n #center_pos = scipy.ndimage.maximum_position(distance, labels=hood)\n max_dist = scipy.ndimage.maximum(distance, labels=hood)\n max_indices = numpy.nonzero((distance==max_dist) & hood)\n\n # extract one max distance position\n if len(max_indices[0]) > 1:\n\n # more than one max, pick the closest to the cm\n max_indices = numpy.asarray(max_indices)\n cm = scipy.ndimage.center_of_mass(self.data==id_)\n cm = numpy.expand_dims(numpy.asarray(cm), axis=-1)\n sq_diff = numpy.square(max_indices - cm)\n cm_dist_sq = numpy.add.reduce(sq_diff, axis=0)\n center_index = scipy.ndimage.minimum_position(cm_dist_sq)[0]\n center_pos = max_indices[:, center_index]\n\n else:\n center_pos = numpy.array([x[0] for x in max_indices])\n\n return numpy.asarray(center_pos)", "def __center_of_mass(self):\n #print self.xyzs, self.__Natoms\n self.com=_np.array([0.0,0.0,0.0])\n self.centroid=_np.array([0.0,0.0,0.0])\n if len(self.xyzs)==0:\n return\n total_mass=0.0\n self.centroid=sum(self.xyzs)/len(self.xyzs)\n wts=[constants.dict_of_atomic_masses[self.list_of_atoms[i][0].replace(\"@\",\"\")] for i in xrange(self.__Natoms)]\n for i,atom in enumerate(self.xyzs):\n wt=wts[i]\n total_mass=total_mass+wt\n self.com=self.com+atom*wt\n self.centroid=_np.array([i/self.__Natoms for i in self.centroid])\n self.com=_np.array([i/total_mass for i in self.com])", "def centerOfMass(self):\n cmf = vtk.vtkCenterOfMass()\n cmf.SetInputData(self.polydata(True))\n cmf.Update()\n c = cmf.GetCenter()\n return np.array(c)", "def get_center(self):\n center = np.mean(self.helix_axis_coords, axis=0) \n return center", "def find_center(self):\n r = self.cluster.r_lambda * np.sqrt(np.random.random(size=1))\n phi = 2. * np.pi * np.random.random(size=1)\n\n x = r * np.cos(phi) / (self.cluster.mpc_scale)\n y = r * np.sin(phi) / (self.cluster.mpc_scale)\n\n ra_cen = self.cluster.ra + x / np.cos(np.radians(self.cluster.dec))\n dec_cen = self.cluster.dec + y\n\n self.ra[0] = ra_cen\n self.dec[0] = dec_cen\n self.ngood = 1\n self.index[0] = -1\n self.maxind = -1\n self.p_cen[0] = 1.0\n self.q_cen[0] = 1.0\n self.p_sat[0] = 0.0\n self.p_fg[0] = 0.0\n self.p_c[0] = 1.0\n\n return True", "def get_center(self):\n\t\thx = self.h[0]\n\t\thy = self.h[1]\n\t\thz = self.h[2]\n\n\t\treturn sum([self.xyz0, [hx/2, hy/2, hz/2]], axis=0)", "def geometric_center(self):\n geometric_center = np.array([0.0, 0.0, 0.0])\n for atom in self.atoms:\n geometric_center += atom.position\n geometric_center /= len(self.atoms)\n return geometric_center", "def get_center(self):\n ra, dec = sphericalFromCartesian(self.bounding_circle[0])\n return np.degrees(ra), np.degrees(dec)", "def center_of_mass(snap: SnapLike) -> ndarray:\n mass: ndarray = snap['mass']\n pos: ndarray = snap['position']\n return (mass[:, np.newaxis] * pos).sum(axis=0)", "def center_of_mass(particles):\n mtot = total_mass(particles)\n\n com = Vector([0,0,0])\n for p in particles:\n com += p.m*p.r/mtot\n\n return com", "def getCenterOfMass(self):\n if not hasattr(self, 'label_clean'):\n self.cleanSample()\n\n self.cms = ndimage.center_of_mass(self.image,\n labels=self.label_clean,\n index=np.unique(self.label_clean))\n self.xcms = [c[1] for c in self.cms]\n self.ycms = [c[0] for c in self.cms]\n\n print 'After cleaning found {0:d} objects'.format(len(self.xcms))\n\n return self.xcms, self.ycms, self.cms", "def get_center_of_mass(xyz):\n masses = get_element_mass_from_xyz(xyz)\n cm_x, cm_y, cm_z = 0, 0, 0\n for coord, mass in zip(xyz['coords'], masses):\n cm_x += coord[0] * mass\n cm_y += coord[1] * mass\n cm_z += coord[2] * mass\n cm_x /= sum(masses)\n cm_y /= sum(masses)\n cm_z /= sum(masses)\n return float(cm_x), float(cm_y), float(cm_z)", "def getCenter(self) -> \"SbVec3f const &\":\n return _coin.SbSphere_getCenter(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute and assign the center of all grains in the microstructure using the grain map. Each grain center is computed using its center of mass. The value is assigned to the grain.center attribute. If the voxel size is specified, the grain centers will be in mm unit, if not in voxel unit.
def recompute_grain_centers(self, verbose=False): if not hasattr(self, 'grain_map'): print('warning: need a grain map to recompute the center of mass of the grains') return for g in self.grains: try: com = self.compute_grain_center(g.id) except ValueError: print('skipping grain %d' % g.id) continue if verbose: print('grain %d center: %.3f, %.3f, %.3f' % (g.id, com[0], com[1], com[2])) g.center = com
[ "def compute_grain_center(self, gid):\n # isolate the grain within the complete grain map\n slices = ndimage.find_objects(self.grain_map == gid)\n if not len(slices) > 0:\n raise ValueError('warning grain %d not found in grain map' % gid)\n sl = slices[0]\n offset = np.array([sl[0].start, sl[1].start, sl[2].start])\n grain_data_bin = (self.grain_map[sl] == gid).astype(np.uint8)\n local_com = ndimage.measurements.center_of_mass(grain_data_bin)\n com = self.voxel_size * (offset + local_com - 0.5 * np.array(self.grain_map.shape))\n return com", "def set_grain_map(self, grain_map, voxel_size):\n self.grain_map = grain_map\n self.voxel_size = voxel_size", "def geometric_center(self):\n geometric_center = np.array([0.0, 0.0, 0.0])\n for atom in self.atoms:\n geometric_center += atom.position\n geometric_center /= len(self.atoms)\n return geometric_center", "def compute_centers(self):\n for cluster_ in range(self.number_clusters): # type: ignore\n center = np.mean(self.data[self.model.labels_ == cluster_], axis=0) # type: ignore\n if center.isnull().values.any(): # type: ignore\n self.centers[cluster_] = center.fillna(0) # type: ignore\n else:\n self.centers[cluster_] = center", "def get_center_of_mass(self):\n atomos = self.get_coords()\n M = self.mol_weight\n centro = Matrix([[0, 0, 0]])\n for a in atomos:\n centro = centro + (Matrix([[a[1], a[2], a[3]]]) * PERIODIC_TABLE[a[0]][\"mass\"])\n centro *= (1 / M)\n return centro", "def center(self):\n for atoms in self:\n atoms.center()", "def find_center(self):\n r = self.cluster.r_lambda * np.sqrt(np.random.random(size=1))\n phi = 2. * np.pi * np.random.random(size=1)\n\n x = r * np.cos(phi) / (self.cluster.mpc_scale)\n y = r * np.sin(phi) / (self.cluster.mpc_scale)\n\n ra_cen = self.cluster.ra + x / np.cos(np.radians(self.cluster.dec))\n dec_cen = self.cluster.dec + y\n\n self.ra[0] = ra_cen\n self.dec[0] = dec_cen\n self.ngood = 1\n self.index[0] = -1\n self.maxind = -1\n self.p_cen[0] = 1.0\n self.q_cen[0] = 1.0\n self.p_sat[0] = 0.0\n self.p_fg[0] = 0.0\n self.p_c[0] = 1.0\n\n return True", "def center_of_mass(self, colony_num=0):\n\n try:\n self.colonies\n except AttributeError:\n print(\"Run `ColonyROIImportCSV.colony_data` first to import data.\")\n\n N = self.get_colony_count()\n if colony_num not in range(N):\n raise ValueError(\"Pick a colony number from 0 to \" + str(N))\n\n x_ = 0\n y_ = 0\n\n colony_roi = self.colonies[colony_num]\n roi_data = colony_roi['roi_data']\n roi_pixel_count = len(roi_data)\n\n for coord in roi_data:\n x_ = x_ + coord[0]\n y_ = y_ + coord[1]\n\n x_ = x_/roi_pixel_count\n y_ = y_/roi_pixel_count\n\n return x_, y_", "def __center_of_mass(self):\n #print self.xyzs, self.__Natoms\n self.com=_np.array([0.0,0.0,0.0])\n self.centroid=_np.array([0.0,0.0,0.0])\n if len(self.xyzs)==0:\n return\n total_mass=0.0\n self.centroid=sum(self.xyzs)/len(self.xyzs)\n wts=[constants.dict_of_atomic_masses[self.list_of_atoms[i][0].replace(\"@\",\"\")] for i in xrange(self.__Natoms)]\n for i,atom in enumerate(self.xyzs):\n wt=wts[i]\n total_mass=total_mass+wt\n self.com=self.com+atom*wt\n self.centroid=_np.array([i/self.__Natoms for i in self.centroid])\n self.com=_np.array([i/total_mass for i in self.com])", "def recalculate_center(self):\n # if we don't have any assigned inputs after this K-Means epoch, leave\n # the center where it was\n if self.assigned_inputs:\n new_center = []\n for dimension in xrange(len(self.assigned_inputs[0])):\n total = reduce(operator.add,\n [x[dimension] for x in self.assigned_inputs])\n new_center.append(float(total) / len(self.assigned_inputs))\n self.center = new_center", "def calculate_center(self, geoJson):\n lonmin, lonmax, latmin, latmax = 99, 0, 99, 0\n for point in geoJson['coordinates'][0]:\n lonmin = min(point[0], lonmin)\n latmin = min(point[1], latmin)\n lonmax = max(point[0], lonmax)\n latmax = max(point[1], latmax)\n\n center = [lonmin + (lonmax - lonmin)/2 , latmin + (latmax - latmin)/2]\n return center\n #df_open_airblocks.iloc[0]['simpleJSON']['coordinates'][0]:", "def center(atoms):\n base_atoms = {'N1', 'C2', 'N3', 'C4', 'C5','C6','O2', 'O4', 'N4'} # pyrimidine atoms \n base_atoms.update({'N2', 'O6', 'N6', 'N7','C8', 'N9'}) # purine specific ones \n cx,cy,cz = 0,0,0\n cpt =0\n for a in atoms : \n if(a.atom_label in base_atoms):\n cx += float(a.x)\n cy += float(a.y)\n cz += float(a.z)\n cpt+=1\n if(cpt >0):\n return (cx/cpt, cy/cpt, cz/cpt)\n else:\n return (0,0,0)", "def center_of_mass(self):\n return self._center_of_mass", "def compute_position_center_of_mass(self):\n return self.position_collection[..., 0].copy()", "def compute_centers(self):\n for img in self.images:\n for i in self.images_superpixels[img]:\n # Retrieve all indices where superpixel label equals i\n indices = np.where(self.images_segmented[img] == i)\n # Approximate the center by the medians of the indices in x and y dimension\n self.images_superpixels_center[img].append((np.median(indices[1]), np.median(indices[0])))", "def get_center(self):\n center = np.mean(self.helix_axis_coords, axis=0) \n return center", "def centre(self, cluster):\r\n size = len(cluster) * 1.0\r\n cen = np.zeros_like(self.data[0])\r\n for item in cluster:\r\n cen = cen + self.data[item]\r\n return cen / size", "def find_centre(\n self,\n xstart=0.0,\n ystart=0.0,\n zstart=0.0,\n vxstart=0.0,\n vystart=0.0,\n vzstart=0.0,\n indx=None,\n nsigma=1.0,\n nsphere=100,\n density=True,\n rmin=0.1,\n rmax=None,\n nmax=100,\n method='harfst',\n nneighbour=6,\n reset_centre=False\n ):\n\n xc,yc,zc,vxc,vyc,vzc=find_centre(self,xstart=xstart,\n ystart=ystart,zstart=zstart,vxstart=vxstart,vystart=vystart,vzstart=vzstart,indx=indx,\n nsigma=nsigma,nsphere=nsphere,density=density,\n rmin=rmin,rmax=rmax,nmax=nmax,method=method,nneighbour=nneighbour)\n\n self._set_centre(xc,yc,zc,vxc,vyc,vzc,reset_centre=reset_centre)\n\n return xc,yc,zc,vxc,vyc,vzc", "def center(self, index=None):\n \n if index == None:\n center = 0.5*(self.grid[1:] + self.grid[:-1])\n else:\n center = 0.5*(self.grid[index + 1] + self.grid[index])\n \n return center" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write orientation data to ascii files to prepare for FFT computation. AMITEX_FFTP can be used to compute the elastoplastic response of polycrystalline microstructures. The calculation needs orientation data for each grain written in the form of the coordinates of the first two basis vectors expressed in the crystal local frame which is given by the first two columns of the orientation matrix. The values are written in 6 files N1X.txt, N1Y.txt, N1Z.txt, N2X.txt, N2Y.txt, N2Z.txt, each containing n values with n the number of grains. The data is written either in BINARY or in ASCII form.
def to_amitex_fftp(self, binary=True): ext = 'bin' if binary else 'txt' n1x = open('N1X.%s' % ext, 'w') n1y = open('N1Y.%s' % ext, 'w') n1z = open('N1Z.%s' % ext, 'w') n2x = open('N2X.%s' % ext, 'w') n2y = open('N2Y.%s' % ext, 'w') n2z = open('N2Z.%s' % ext, 'w') files = [n1x, n1y, n1z, n2x, n2y, n2z] if binary: import struct for f in files: f.write('%d \ndouble \n' % self.get_number_of_grains()) f.close() n1x = open('N1X.%s' % ext, 'ab') n1y = open('N1Y.%s' % ext, 'ab') n1z = open('N1Z.%s' % ext, 'ab') n2x = open('N2X.%s' % ext, 'ab') n2y = open('N2Y.%s' % ext, 'ab') n2z = open('N2Z.%s' % ext, 'ab') for g in self.grains: gt = g.orientation_matrix().T n1 = gt[0] n2 = gt[1] n1x.write(struct.pack('>d', n1[0])) n1y.write(struct.pack('>d', n1[1])) n1z.write(struct.pack('>d', n1[2])) n2x.write(struct.pack('>d', n2[0])) n2y.write(struct.pack('>d', n2[1])) n2z.write(struct.pack('>d', n2[2])) else: for g in self.grains: gt = g.orientation_matrix().T n1 = gt[0] n2 = gt[1] n1x.write('%f\n' % n1[0]) n1y.write('%f\n' % n1[1]) n1z.write('%f\n' % n1[2]) n2x.write('%f\n' % n2[0]) n2y.write('%f\n' % n2[1]) n2z.write('%f\n' % n2[2]) n1x.close() n1y.close() n1z.close() n2x.close() n2y.close() n2z.close() print('orientation data written for AMITEX_FFTP') # if possible, write the vtk file to run the computation if hasattr(self, 'grain_map') and self.grain_map is not None: # convert the grain map to vtk file from vtk.util import numpy_support vtk_data_array = numpy_support.numpy_to_vtk(np.ravel(self.grain_map, order='F'), deep=1) vtk_data_array.SetName('GrainIds') grid = vtk.vtkImageData() size = self.grain_map.shape grid.SetExtent(0, size[0], 0, size[1], 0, size[2]) grid.GetCellData().SetScalars(vtk_data_array) grid.SetSpacing(self.voxel_size, self.voxel_size, self.voxel_size) writer = vtk.vtkStructuredPointsWriter() writer.SetFileName('%s_pymicro.vtk' % self.name) if binary: writer.SetFileTypeToBinary() writer.SetInputData(grid) writer.Write() print('grain map written in legacy vtk form for AMITEX_FFTP')
[ "def make_ascii_(self, metadata=[], data=[], output_file_name=''):\n with open(output_file_name, 'w') as f:\n for _meta in metadata:\n _line = _meta + \"\\n\"\n f.write(_line)\n for _data in data:\n _line = str(_data) + '\\n'\n f.write(_line)", "def write_data(natoms, lx,ly, tpe, molid, x, y, bid, btpe, b1, b2, aid, atpe, a1, a2, a3,ofname):\n ofile = open(ofname, 'w')\n ### write down header information\n ofile.write('LAMMPS data file filaments in 2D\\n\\n')\n ofile.write(str(natoms) + ' atoms\\n')\n ofile.write('1 atom types\\n')\n ofile.write(str(max(bid)) + ' bonds\\n')\n ofile.write('1 bond types\\n')\n ofile.write(str(max(aid)) + ' angles\\n')\n ofile.write('1 angle types\\n\\n')\n ofile.write('0.0 ' + str(lx) + ' xlo xhi\\n')\n ofile.write('0.0 ' + str(ly) + ' ylo yhi\\n')\n ofile.write('-2.5 2.5 zlo zhi\\n\\n')\n ofile.write('Masses\\n\\n')\n ofile.write('1 1\\n\\n')\n ### Atoms section\n ofile.write('Atoms\\n\\n')\n for i in range(natoms):\n ofile.write(str(i+1) + ' ' + str(molid[i]) + ' ' + str(tpe[i]) + ' ' + str(x[i]) + ' ' + str(y[i]) + ' 0.0\\n')\n ofile.write('\\n')\n ### Bonds section\n ofile.write('Bonds\\n\\n')\n for i in range(len(bid)):\n ofile.write(str(bid[i]) + ' ' + str(btpe[i]) + ' ' + str(b1[i]) + ' ' + str(b2[i]) + '\\n')\n ofile.write('\\n')\n ### Angles section\n ofile.write('Angles\\n\\n')\n for i in range(len(aid)):\n ofile.write(str(aid[i]) + ' ' + str(atpe[i]) + ' ' + str(a1[i]) + ' ' + str(a2[i]) + ' ' + str(a3[i]) + '\\n')\n ofile.write('\\n')\n ofile.close()\n return", "def Output2File(data_array, filebase, format, hdr=None, shape=None):\n # used by 'AIDA_Functions.py'\n \n # below is old\n #if shape is None:\n #\n # shape = data_array.shape\n \n ### EHom (20130625): adding line to shape data_array according to shape input parameter\n ### Should have been here before\n if (shape != None):\n data_array.shape = shape\n \n import matplotlib.pyplot as plt\n #plt.figure()\n #plt.imshow(data_array)\n #plt.title(data_array[0,0])\n #plt.show()\n \n if format == 'm':\n\n Mrc.save(data_array, filebase + '.mrc', ifExists=\"overwrite\")\n \n # below is old way - Mrc.bindArr no longer exists in Priithon\n #rs = ''\n #\n #for i in shape:\n # \n # rs += '%d ' %i\n #\n #dtype = data_array.dtype\n #\n #temp = Mrc.bindArr(filebase + '.mrc', data_array.astype(np.float32))\n ## can only write out as single precision\n #fileheader = temp.Mrc.hdrArray[0]\n #fileheader.setfield('NumTitles',1)\n #fileheader.field('title')[0] = 'Shape: ' + rs\n #temp.Mrc.close()\n ## STILL NEED TO PROVIDE A WAY OF SETTING HEADER INFO FROM INPUT\n \n elif format == 'f':\n\n if os.path.exists(filebase + '.fits') == 1:\n\n os.remove(filebase + '.fits')\n\n # Clement: using astropy.io.fits now\n \n fits_file = iofits.HDUList()\n datahdu = PrimaryHDU()\n datahdu.data = data_array\n \n \n iofits.append(filebase + '.fits',data_array,header=hdr)\n \n elif format == 't':\n if os.path.exists(filebase + '.tiff') == 1:\n\n os.remove(filebase + '.tiff')\n \n img = scipy.misc.toimage(data_array)\n img.save(filebase + '.tiff')\n \n elif format == 't2':\n if os.path.exists(filebase + '.tif') == 1:\n\n os.remove(filebase + '.tif')\n \n img = scipy.misc.toimage(data_array)\n img.save(filebase + '.tif')\n \n# Clement: Old version using pyfits (deprecated)\n# fits_file = pyfits.HDUList()\n# datahdu = pyfits.PrimaryHDU()\n# datahdu.data = data_array\n# \n# ## STILL NEED TO PROVIDE A WAY OF SETTING HEADER INFO FROM INPUT\n# #if type(hdr) is not types.NoneType:\n# #\n# # datahdu.header = hdr\n# # \n# # print hdr\n# \n# # Provide header info from the original fits file.\n# \n# \n# fits_file.append(datahdu)\n# fits_file.writeto(filebase + '.fits')\n \n# else: # format must be .tiff\n# \n# #!!!! TENTATIVE !!!!\n# # make sure orientation of TIFF file matches convention\n# if len(data_array.shape) == 2:\n# \n# U.saveImg(data_array[...,::-1,...], filebase + \".tiff\")\n# elif len(data_array.shape) == 3:\n# \n# U.saveImg_seq(data_array[...,::-1,...], filebase + \".tiff\")\n# else:\n# \n# message = \"\\n'data_array' shape is not 2 or 3! Cannot write \" + \\\n# \"out TIFF file!\"\n# raise ValueError, message\n\n ### EHom (20130616): also output results (if 2D) as an 8-bit JPEG files using PIL\n ### In the division of 255, I hack the addition of a small value to avoid \n ### a divide by zero in a true_divide call\n if len(data_array.shape) == 2:\n\n min = data_array.min()\n max = data_array.max()\n #print data_array.min()\n #print data_array.max()\n #print data_array.mean()\n rescaled = np.where(data_array > min, data_array-min, 0.)\n if ((max - min) == 0):\n message = \"\\nMax Min problem in outputting array! Cannot write JPEG file\\n\"\n print(message)\n else:\n rescaled *= (255.0 / (max - min))\n # Clement: we don't need to save the jpeg\n # im = ImageOps.flip(Image.fromarray(rescaled.astype(np.uint8)))\n # rescale and flip vertically to properly register image with FITS output\n # im.save(filebase + '.jpeg')", "def write_flatsurf_file(input_file_name, output_input_file_name, phase1, phase2, T, IFT, IFT_write_length, phase_types, max_depth):\n max_depth_str = \"\"\n if phase_types[0] == \"S\" or phase_types[1] == \"S\":# or phase_types[0] == \"G\" or phase_types[1] == \"G\":\n max_depth_str = \"maxdepth={} \".format(max_depth)\n \n with open(input_file_name+\".inp\", \"r\") as file: # Read the inital input file\n lines = file.readlines()\n # Create output_input_file_name.inp file and write all lines except the last from initial file and write new last line\n with open(output_input_file_name+\".inp\", \"w\") as output: \n output.writelines(lines[0])\n output.writelines(max_depth_str+\" \"+lines[1])\n output.writelines(lines[2:-1]) # All lines except the last\n # Last line \n (output.write(\"tk={0} FLATSURF xf1={{{1}}} xf2={{{2}}} IGNORE_CHARGE IFT={3:.{4}f} \\n\".\n format(T, \" \".join(map(str,phase1)), \" \".join(map(str,phase2)), IFT, IFT_write_length)))\n return", "def write_xyz(x, y, nsteps, natoms, filename):\n \n print \"WRITING TO XYZ\"\n \n ### open files for writing\n \n f = open(filename, 'w')\n \n ### write to file\n \n for step in range(nsteps):\n \n f.write(str(natoms) + '\\n\\n')\n \n for j in range(natoms):\n \n f.write('S\\t' + str(x[step][j]) + '\\t' + str(y[step][j]) + '\\t 0\\n')\n \n f.close()\n \n return", "def LSIWriter(filename, pixels_per_column, pixels_per_row, channels, \n numeric_type_indicator, apodization_type, \n remap_type, image_plane_indicator, rf_center_frequency, \n rf_bandwidth, dwell_angle, cone_angle, graze_angle, twist_angle, \n column_sample_spacing, row_sample_spacing, \n column_oversampling_factor, row_oversampling_factor, \n column_resolution, row_resolution,\n text_header, data): \n \n file = open(filename, 'wb')\n \n # Write Int32 Header Values\n file.write(np.int32(pixels_per_column))\n file.write(np.int32(pixels_per_row))\n file.write(np.int32(channels))\n file.write(np.int32(numeric_type_indicator))\n file.write(np.int32(apodization_type))\n file.write(np.int32(remap_type))\n file.write(np.int32(image_plane_indicator)) \n \n # Write Float32 Header Values\n file.write(np.float32(rf_center_frequency))\n file.write(np.float32(rf_bandwidth))\n file.write(np.float32(dwell_angle))\n file.write(np.float32(cone_angle))\n file.write(np.float32(graze_angle))\n file.write(np.float32(twist_angle))\n file.write(np.float32(column_sample_spacing)) \n file.write(np.float32(row_sample_spacing))\n file.write(np.float32(column_oversampling_factor))\n file.write(np.float32(row_oversampling_factor))\n file.write(np.float32(column_resolution))\n file.write(np.float32(row_resolution))\n \n file.write(bytes('\\0' * (200-file.tell()),'utf-8'))\n\n # Exactly 200 characters \n file.write(bytes(text_header[:200].ljust(200), 'utf-8'))\n \n if numeric_type_indicator == 1:\n file.write(np.float32(data))\n elif numeric_type_indicator == 2:\n file.write(np.complex64(data))\n else:\n err = 'Invalid \"numeric_type_indicator\". Valid range is 1 or 2'\n ValueError(err)\n \n file.close()", "def pkl_data_to_txt(self):\n for i in range(len(self.pkl_to_convert[0])):\n folder = os.path.dirname(self.pkl_to_convert[0][i])\n filename_ext = os.path.basename(self.pkl_to_convert[0][i])\n filename = os.path.splitext(filename_ext)[0] #get filename without extension\n pkl_file = pickle.load(open(self.pkl_to_convert[0][i], 'rb'))\n \n txt_file = np.zeros(shape=(2048,pkl_file['Intensities'].shape[0] + 1))\n \n data_array = pkl_file['Intensities']\n data_array = np.transpose(data_array)\n wavelength = pkl_file['Wavelengths']\n \n txt_file[:,0] = wavelength\n \n for i in range(pkl_file['Intensities'].shape[0]):\n txt_file[:,i+1] = data_array[:,i]\n \n np.savetxt(folder +\"/\"+ filename +\"_data.txt\", txt_file, fmt = '%.2f', delimiter= \"\\t\", header=\"wavelength(nm), Intensities at different points\")", "def writeIMPACT(filename,beam,lattice=[]):\n beamStrList=beam2str(beam) \n latticeStrList=lattice2str(lattice)\n \n \n f=open(filename,'w') \n f.writelines(beamStrList)\n f.writelines(latticeStrList)\n f.close()", "def write_file(output_name, parsed_xQTL_list):\n with open(output_name, \"w\") as thefile:\n thefile.write(\"metabolite\\tchr\\tpeak_mb\\tinf_mb\\tsup_mb\\tlod\\n\")\n for xQTL in parsed_xQTL_list:\n xQTL = [str(element) for element in xQTL]\n line = \"\\t\".join(xQTL)\n thefile.write(line + \"\\n\")", "def __write_to_file(output_dir, p_values, nans, fname):\n fname = output_dir + \"/\" + fname\n \n f = open(fname, 'w')\n f.write('name\\tp-val\\tenrinched in\\n')\n p_values.sort()\n \n for tp in p_values:\n pval = (\"%.12f\" % __round_sig(tp[0])).rstrip('0')\n attr_name = str(tp[1])\n enriched_in = str(tp[2])\n f.write(attr_name + \"\\t\" + pval + \"\\t\" + enriched_in + \"\\n\")\n\n for n in nans:\n attr_name = str(n[1])\n f.write(attr_name + \"\\tn/a\\n\")\n\n f.close()", "def write_instrument(self, f):\n trainids = np.arange(self.firsttrain, self.firsttrain + self.ntrains)\n\n ntrains_pad = self.ntrains\n if ntrains_pad % self.chunksize:\n ntrains_pad += + self.chunksize - (ntrains_pad % self.chunksize)\n\n # INDEX\n for part in self.output_parts:\n dev_chan = '%s:xtdf/%s' % (self.device_id, part)\n\n i_first = f.create_dataset('INDEX/%s/first' % dev_chan,\n (self.ntrains,), 'u8', maxshape=(None,))\n i_count = f.create_dataset('INDEX/%s/count' % dev_chan,\n (self.ntrains,), 'u8', maxshape=(None,))\n if part == 'image':\n i_first[:] = np.arange(self.ntrains) * self.frames_per_train\n i_count[:] = self.frames_per_train\n else:\n i_first[:] = np.arange(self.ntrains)\n i_count[:] = 1\n\n\n # INSTRUMENT (image)\n nframes = self.ntrains * self.frames_per_train\n ds = f.create_dataset('INSTRUMENT/%s:xtdf/image/trainId' % self.device_id,\n (nframes, 1), 'u8', maxshape=(None, 1))\n ds[:, 0] = np.repeat(trainids, self.frames_per_train)\n\n pid = f.create_dataset('INSTRUMENT/%s:xtdf/image/pulseId' % self.device_id,\n (nframes, 1), 'u8', maxshape=(None, 1))\n pid[:, 0] = np.tile(np.arange(0, self.frames_per_train, dtype='u8'),\n self.ntrains)\n\n for (key, datatype, dims) in self.image_keys:\n f.create_dataset('INSTRUMENT/%s:xtdf/image/%s' % (self.device_id, key),\n (nframes,) + dims, datatype, maxshape=((None,) + dims))\n\n\n # INSTRUMENT (other parts)\n for part in ['detector', 'header', 'trailer']:\n ds = f.create_dataset('INSTRUMENT/%s:xtdf/%s/trainId' % (self.device_id, part),\n (ntrains_pad,), 'u8', maxshape=(None,))\n ds[:self.ntrains] = trainids\n\n for (key, datatype, dims) in self.other_keys:\n f.create_dataset('INSTRUMENT/%s:xtdf/%s' % (self.device_id, key),\n (ntrains_pad,) + dims, datatype, maxshape=((None,) + dims))", "def create_m_file(self):\n global HZ_FREQ, ale_file_name, ale_file_data, tdoa_mode\n if platform.system() == \"Windows\":\n os_sep = \"\\\\\\\\\"\n else:\n os_sep = os.sep\n iq_files = []\n run_dir = os.path.join('TDoA', 'iq') + os_sep + starttime + tdoa_mode + str(FREQUENCY) + os_sep\n os.makedirs(run_dir)\n run_type = \"\"\n for iq_file in glob.glob(os.path.join('TDoA', 'iq') + os.sep + \"*.wav\"):\n copyfile(iq_file, run_dir + iq_file.rsplit(os.sep, 1)[1])\n iq_files.append(os.path.split(iq_file)[1])\n try:\n if tcpclient.get() == 1:\n with open(run_dir + ale_file_name, \"w\") as ale_file_desc:\n ale_file_desc.write(ale_file_data.decode('utf-8'))\n ale_file_desc.close()\n try:\n for ale_file in glob.glob(run_dir + \"ALE*.txt\"):\n a = open(ale_file, 'r')\n aledata = a.read()\n a.close()\n try:\n run_type = \" - [ALE ID: \" + str(\n re.search(r\".+\\[(TWS|TIS)\\]\\[(.*)\\]\\[\", aledata).group(2)) + \"]\"\n except AttributeError:\n run_type = \" - [ALE]\"\n except NameError:\n pass\n except NameError:\n pass\n firstfile = iq_files[0]\n HZ_FREQ = str(firstfile.split(\"_\", 2)[1].split(\"_\", 1)[0])\n proc_m_name = os.path.join('TDoA') + os.sep + \"proc_tdoa_\" + str(firstfile.split(\"_\", 2)[1].split(\"_\", 1)[0])\n with open(proc_m_name + \".m\", \"w\") as m_file:\n m_file.write(\"\"\"## -*- octave -*-\n## This file was auto-generated by \"\"\" + VERSION + \"\"\"\n## \"\"\" + str(b_box2[0]) + \"\"\",\"\"\" + str(b_box2[1]) + \"\"\"\n\\nfunction [tdoa,input]=proc_tdoa_\"\"\" + HZ_FREQ + \"\"\"\n exitcode = 0;\n status = struct;\\n\n try\n status.version = tdoa_get_version();\n\n\"\"\")\n if (ultimate.get()) == 1:\n m_file.write(\" # nodes\\n\")\n else:\n for i in range(len(iq_files)):\n m_file.write(\" input(\" + str(i + 1) + \").fn = fullfile('iq', '\" + str(iq_files[i]) + \"');\\n\")\n m_file.write(\"\"\"\n config = struct('lat_range', [\"\"\" + str(lat_min_map) + \"\"\" \"\"\" + str(lat_max_map) + \"\"\"],\n 'lon_range', [\"\"\" + str(lon_min_map) + \"\"\" \"\"\" + str(lon_max_map) + \"\"\"],\"\"\")\n if selectedlat == \"\" or selectedlon == \"\":\n m_file.write(\"\"\"\n 'known_location', struct('coord', [-90 180],\n 'name', ' '),\"\"\")\n else:\n m_file.write(\"\"\"\n 'known_location', struct('coord', [\"\"\" + str(selectedlat) + \"\"\" \"\"\" + str(selectedlon) + \"\"\"],\n 'name', '\"\"\" + str(selectedcity.rsplit(' (')[0].replace('_', ' ')) + \"\"\"'),\"\"\")\n m_file.write(\"\"\"\n 'dir', 'png',\n 'plot_kiwi', false,\n 'plot_kiwi_json', \"\"\" + str(PKJ) + \"\"\",\n 'use_constraints', \"\"\" + str(UC) + \"\"\",\n 'new', \"\"\" + str(TDOAVERSION) + \"\"\"\n );\n\n ## determine map resolution and create config.lat and config.lon fields\n config = tdoa_autoresolution(config);\n\n [input,status.input] = tdoa_read_data(config, input, 'gnss_pos');\n\n config.plotname = sprintf('TDoA_\"\"\" + HZ_FREQ + \"\"\"');\n config.title = sprintf('\"\"\" + FREQUENCY + \"\"\" \"\"\" + mode + \"\"\" [BW=\"\"\" + str(abs(lpcut - hpcut))\n + \"\"\"] - REC on \"\"\" + str(datetime.utcnow().strftime('%d %b %Y %H%M.%Sz'))\n + run_type + \"\"\"');\n \n if config.new\n [tdoa, status.cross_correlations] = tdoa_compute_lags_new(input);\n else\n [tdoa, status.cross_correlations] = tdoa_compute_lags(input, ...\n struct('dt', 12000, # 1-second cross-correlation intervals\n 'range', 0.005, # peak search range is +-5 ms\n 'dk', [-2:2], # use 5 points for peak fitting\n 'fn', @tdoa_peak_fn_pol2fit,# fit a pol2 to the peak\n 'remove_outliers', ~config.use_constraints\n ));\n end\n\n if config.use_constraints\n [tdoa,status.cross_correlations] = tdoa_cluster_lags(config, tdoa, input, status.cross_correlations);\n [tdoa,input,status.constraints] = tdoa_verify_lags (config, tdoa, input);\n end\n [tdoa,status.position] = tdoa_plot_map(input, tdoa, config);\n # if config.new\n # tdoa = tdoa_plot_dt_new(input, tdoa, config, 1e-2);\n # else\n # tdoa = tdoa_plot_dt (input, tdoa, config, 2.5e-3);\n # end\n catch err\n json_save_cc(stderr, err);\n status.octave_error = err;\n exitcode = 1;\n end_try_catch\n\n try\n fid = fopen(fullfile('png', 'status.json'), 'w');\n json_save_cc(fid, status);\n fclose(fid);\n catch err\n json_save_cc(stderr, err);\n exitcode += 2;\n end_try_catch\n\n if exitcode != 0\n exit(exitcode);\n end\n\"\"\")\n m_file.write(\"\"\"\n# Set T time for filenames\n[tdoatime] = strftime (\\\"%d%b%Y_%H%M%Sz\\\", gmtime (time ()));\n\\n# Get coordinates from the current TDoA computing\nglobal mlp;\n[lat,lon] = deal(strsplit(mlp, \\\" \\\"){4}, strsplit(strsplit(mlp, \\\" \\\"){6}, \\\"]\\\"){1});\n\\n# Get Mapbox with the most_likely coordinates of the current TDoA computing\"\"\")\n\n # Adapt the getmap.py arguments if a known place has been set or not\n if platform.system() == \"Windows\":\n python_path = '..\\\\\\python\\\\\\pythonw.exe'\n else:\n python_path = 'python'\n if selectedlat == \"\" or selectedlon == \"\":\n m_file.write(\"\"\"\n[curlcmd] = [\\\"\"\"\" + python_path + \"\"\" \", \"..\"\"\" + os_sep + \"\"\"getmap.py \", lat, \" \", lon, \" -90\", \" 180\", \" \"\"\"\n + MAP_BOX + \"\"\" \", \\\"\"\"\" + run_dir[5:] + \"\"\"TDoA_Map.png\", \" proc_tdoa_\"\"\"\n + HZ_FREQ + \"\"\".m\"];\"\"\")\n else:\n m_file.write(\"\"\"\n[curlcmd] = [\\\"\"\"\" + python_path + \"\"\" \", \"..\"\"\" + os_sep + \"\"\"getmap.py \", lat, \" \", lon, \" \"\"\"\n + str(selectedlat) + \"\"\"\", \" \"\"\" + str(selectedlon) + \"\"\"\", \" \"\"\"\n + MAP_BOX + \"\"\" \", \\\"\"\"\" + run_dir[5:] + \"\"\"TDoA_Map.png\", \" proc_tdoa_\"\"\"\n + HZ_FREQ + \"\"\".m\"];\"\"\")\n\n m_file.write(\"\"\"\nsystem(curlcmd);\n\\n# Merge TDoA result (pdf) + Mapbox (pdf) + plot_iq (pdf) into a single .pdf file\n[gscmd] = [\"gs -q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -sOutputFile=\"\"\" + run_dir[5:] + \"\"\"TDoA_\"\"\"\n + str(HZ_FREQ) + \"\"\"_\", tdoatime, \".pdf pdf\"\"\" + os_sep + \"\"\"TDoA_\"\"\" + str(HZ_FREQ) + \"\"\".pdf \"\"\"\n + run_dir[5:] + \"\"\"TDoA_Map.pdf \"\"\" + run_dir[5:] + \"\"\"TDoA_\"\"\"\n + str(HZ_FREQ) + \"\"\"_spectrogram.pdf -c \\\\\\\"[ /Title (TDoA_\"\"\"\n + str(HZ_FREQ) + \"\"\"_\", tdoatime, \".pdf) /DOCINFO pdfmark\\\\\\\" -f\\\"];\nsystem(gscmd);\n\\n# Delete some files\ndelete(\\\"\"\"\" + run_dir[5:] + \"\"\"TDoA_Map.pdf\")\ndisp(\"finished\");\nendfunction\n\"\"\")\n m_file.close()\n time.sleep(0.2)\n if (ultimate.get()) == 1:\n copyfile(proc_m_name + \".m\", run_dir + \"proc_tdoa_\" + HZ_FREQ + \".empty\")\n if platform.system() == \"Windows\":\n copyfile(\"compute_ultimate.bat\", run_dir + \"compute_ultimate.bat\")\n copyfile(\"compute_ultimate.py\", run_dir + \"compute_ultimate.py\")\n copyfile('plot_iq.py', run_dir + \"plot_iq.py\")\n copyfile('trim_iq.py', run_dir + \"trim_iq.py\")\n copyfile('nognss.py', run_dir + \"nognss.py\")\n os.chmod(run_dir + \"compute_ultimate.py\", 0o777)\n os.chmod(run_dir + \"plot_iq.py\", 0o777)\n os.chmod(run_dir + \"trim_iq.py\", 0o777)\n os.chmod(run_dir + \"nognss.py\", 0o777)\n # PlotIQ().start()\n else:\n copyfile(proc_m_name + \".m\", run_dir + \"proc_tdoa_\" + HZ_FREQ + \".m\")\n if platform.system() == \"Windows\":\n with open(run_dir + \"recompute.bat\", \"w\") as recompute:\n recompute.write(\"\"\":: \"\"\" + VERSION + \"\"\" - Windows recompute TDoA batch file\n:: This script moves *.wav back to iq directory and proc_tdoa_\"\"\" + HZ_FREQ + \"\"\".m to\n:: TDoA directory then opens a file editor so you can modify .m file parameters.\n@echo off\nset PATH=%CD%\\..\\..\\..\\octave\\\\bin;%CD%\\..\\..\\..\\python;%PATH%\nif not exist *spectrogram.pdf pythonw.exe plot_iq.py\ncopy *.wav ..\\\\\ncopy proc_tdoa_\"\"\" + HZ_FREQ + \"\"\".m ..\\..\\\\\ncd ..\\..\nstart /W notepad \"proc_tdoa_\"\"\" + HZ_FREQ + \"\"\".m\"\noctave-cli.exe proc_tdoa_\"\"\" + HZ_FREQ + \"\"\".m\ndel proc_tdoa_\"\"\" + HZ_FREQ + \"\"\".m\"\"\")\n recompute.close()\n copyfile('plot_iq.py', run_dir + \"plot_iq.py\")\n copyfile('trim_iq.py', run_dir + \"trim_iq.py\")\n copyfile('nognss.py', run_dir + \"nognss.py\")\n else:\n with open(run_dir + \"recompute.sh\", \"w\") as recompute:\n recompute.write(\"\"\"#!/bin/bash\n## This script moves *.wav back to iq directory and proc_tdoa_\"\"\" + HZ_FREQ + \"\"\".m to\n## TDoA directory then opens a file editor so you can modify .m file parameters.\n[ ! -f ./TDoA_\"\"\" + HZ_FREQ + \"\"\"_spectrogram.pdf ] && python plot_iq.py\ncp ./*.wav ../\ncp proc_tdoa_\"\"\" + HZ_FREQ + \"\"\".m ../../\ncd ../..\n$EDITOR proc_tdoa_\"\"\" + HZ_FREQ + \"\"\".m\noctave-cli proc_tdoa_\"\"\" + HZ_FREQ + \"\"\".m\nrm -f proc_tdoa_\"\"\" + HZ_FREQ + \"\"\".m\"\"\")\n # octave-cli \"\"\" + (\"--eval \" if sys.version_info[0] == 3 else \"\") + \"\"\"proc_tdoa_\"\"\" + HZ_FREQ\n # + (\".m\" if sys.version_info[0] == 2 else \"\") + \"\"\"\n recompute.close()\n os.chmod(run_dir + \"recompute.sh\", 0o777)\n copyfile('plot_iq.py', run_dir + \"plot_iq.py\")\n copyfile('trim_iq.py', run_dir + \"trim_iq.py\")\n copyfile('nognss.py', run_dir + \"nognss.py\")\n os.chmod(run_dir + \"plot_iq.py\", 0o777)\n os.chmod(run_dir + \"trim_iq.py\", 0o777)\n os.chmod(run_dir + \"nognss.py\", 0o777)", "def write_specs(output_filename, K, Q, V, A):\n log_and_print(\"Writing exam specs to file: %s\" % output_filename)\n (N_e, N_Q) = Q.shape\n with open(output_filename, 'w') as out_f:\n writer = csv.writer(out_f)\n row = [\"e\", \"K(e)\"]\n for qi in range(N_Q):\n row.extend([\"Q(e,q=%d)\" % (qi + 1), \"V(e,q=%d)\" % (qi + 1), \"A(e,q=%d,:)\" % (qi + 1)])\n writer.writerow(row)\n for ei in range(N_e):\n row = [ei + 1, K[ei]]\n for qi in range(N_Q):\n row.extend([Q[ei,qi] + 1, V[ei,qi] + 1, \"\".join(A[ei,qi,:])])\n writer.writerow(row)\n log(\"Successfully completed writing exam specs to file\")", "def write_mux(self, lat_long_points, time_step_count, time_step,\n depth=None, ha=None, ua=None, va=None):\n\n #print \"lat_long_points\", lat_long_points\n #print \"time_step_count\",time_step_count\n #print \"time_step\",\n\n \n points_num = len(lat_long_points)\n lonlatdeps = []\n quantities = ['HA','UA','VA']\n \n mux_names = [WAVEHEIGHT_MUX_LABEL,\n EAST_VELOCITY_LABEL,\n NORTH_VELOCITY_LABEL]\n quantities_init = [[],[],[]]\n # urs binary is latitude fastest\n for point in lat_long_points:\n lat = point[0]\n lon = point[1]\n _ , e, n = redfearn(lat, lon)\n if depth is None:\n this_depth = n\n else:\n this_depth = depth\n if ha is None:\n this_ha = e\n else:\n this_ha = ha\n if ua is None:\n this_ua = n\n else:\n this_ua = ua\n if va is None:\n this_va = e \n else:\n this_va = va \n lonlatdeps.append([lon, lat, this_depth])\n quantities_init[0].append(this_ha) # HA\n quantities_init[1].append(this_ua) # UA\n quantities_init[2].append(this_va) # VA \n \n file_handle, base_name = tempfile.mkstemp(\"\")\n os.close(file_handle)\n os.remove(base_name)\n\n files = [] \n for i, q in enumerate(quantities): \n quantities_init[i] = ensure_numeric(quantities_init[i])\n #print \"HA_init\", HA_init\n q_time = num.zeros((time_step_count, points_num), num.float64)\n for time in range(time_step_count):\n q_time[time,:] = quantities_init[i] #* time * 4\n \n #Write C files\n columns = 3 # long, lat , depth\n file = base_name + mux_names[i]\n #print \"base_name file\",file \n f = open(file, 'wb')\n files.append(file)\n f.write(pack('i',points_num))\n f.write(pack('i',time_step_count))\n f.write(pack('f',time_step))\n\n #write lat/long info\n for lonlatdep in lonlatdeps:\n for float in lonlatdep:\n f.write(pack('f',float))\n \n # Write quantity info\n for time in range(time_step_count):\n for point_i in range(points_num):\n f.write(pack('f',q_time[time,point_i]))\n #print \" mux_names[i]\", mux_names[i] \n #print \"f.write(pack('f',q_time[time,i]))\", q_time[time,point_i]\n f.close()\n return base_name, files", "def encode_file(matrix, fp):\n fp.write(IdxEncoder().write(matrix))", "def write_tinker_xyz(self, fname, elems=None, atypes=None, xyz=None, \n cnct = None, cell=None,fullcell=False, mode=\"w\"):\n if elems == None: elems = self.elements\n if atypes== None: atypes= self.atypes\n if xyz == None: xyz = self.xyz\n if cnct == None: cnct= self.cnct\n #if cell == None: cell= self.cell\n if numpy.equal(cell, None): cell = self.cell\n natoms = numpy.shape(xyz)[0]\n if type(cell) != type(None):\n cellparams = unit_cell.abc_from_vectors(cell)\n if type(cell) != type(None):\n if fullcell:\n pass\n else:\n rotcell = unit_cell.vectors_from_abc(cellparams)\n # compute fractional coords in old cell\n inv_cell = numpy.linalg.inv(cell)\n abc = numpy.dot(xyz, inv_cell)\n # compute cartesian coordinates in new cellvectors from fractional coords\n xyz = numpy.dot(abc,rotcell)\n if self.is_master:\n f = open(fname, mode)\n if type(cell) != type(None):\n if fullcell:\n f.write((\"%5d # \"+9*\"%10.4f \"+\"\\n\") % tuple([natoms]+cell.ravel().tolist())) \n else:\n f.write(\"%5d %10.4f %10.4f %10.4f %10.4f %10.4f %10.4f\\n\" % tuple([natoms]+cellparams))\n else:\n f.write(\"%5d \\n\" % natoms)\n for i in xrange(natoms):\n line = (\"%3d %-3s\" + 3*\"%12.6f\" + \" %5s\") % \\\n tuple([i+1]+[elems[i]]+ xyz[i].tolist() + [atypes[i]])\n conn = (numpy.array(cnct[i])+1).tolist()\n if len(conn) != 0:\n line += (len(conn)*\"%7d\") % tuple(conn)\n f.write(\"%s \\n\" % line)\n f.close()\n return", "def save_sample_orientations_to_CSV_file(self, filename):\n #List of positions\n pos_list = self.get_positions_used(None, also_try_position=False)\n g = self.inst.goniometer\n #Start CSV file\n f = open(filename, \"w\")\n g.csv_make_header(f, self.crystal.name, self.crystal.description)\n #Go to some angle\n for (i, pos) in enumerate(pos_list):\n if not pos is None:\n g.csv_add_position(f, pos.angles, pos.criterion, pos.criterion_value, pos.comment)\n #Save the file\n f.close()", "def write_frames_ascii(filename,trace,size,sx,sy,A,n,b):\n \n file = str(filename)\n f=open(file,'w') #opens in only writing mode first, to overwrite the file if it already exists\n f.close()\n f=open(file, 'a')\n \n t = trace[:,0] # the trace array contains the 2D coordinates and the time points\n for i in t:\n xc = trace[i,1]\n yc = trace[i,2]\n frame = gaussian_frame(xc,yc,size,sx,sy,A,n,b) #appends every new frame to the file\n np.savetxt(f,frame)\n f.close() \n \n #writes a metafile about the frames (as a dictionary), containing the size and the length:\n trace_length = len(trace)\n Info = {\"trace length\": trace_length, \"size frames\": size} \n writer = csv.writer(open(filename + 'Info', 'wb'))\n for key, value in Info.items():\n writer.writerow([key, value])", "def output2file(list_all_coord, list4bed):\n\n #db = open(\"LRG_coord.txt\",\"w\")\n #db_csv = open(\"LRG_coord.csv\",\"w\")\n #bed = open (\"LRG_bed\", \"w\")\n \n db = open(\"./Outputs/LRG_coord.txt\",\"w\")\n db_csv = open(\"./Outputs/LRG_coord.csv\",\"w\")\n bed = open (\"./Outputs/LRG_bed\", \"w\")\n \n \n headings = [\"transcript\",\"exon\", \"ex_start\", \"ex_end\", \"tr_start\", \"tr_end\", \"pt_start\", \"pt_end\"]\n bed_headings = [\"chr\", \"start\", \"end\", \"strand\", \"transcript\" ]\n \n #Writting tab separated text file\n db.write(\"\\t\".join(headings) + \"\\n\") # writting headings\n for group in list_all_coord: \n db.write (\"\\t\".join(group) + \"\\n\") # writting coordinates\n \n #Writting csv file\n db_csv.write(\",\".join(headings) + \"\\n\") # writting headings\n for group in list_all_coord: \n db_csv.write (\",\".join(group) + \"\\n\") # writting coordinates\n\n bed.write(\"\\t\".join(bed_headings) + \"\\n\") # writting headings\n for group in list4bed: \n bed.write (\"\\t\".join(group) + \"\\n\")\n\n db.close()\n db_csv.close()\n bed.close()\n \n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Outputs the material block corresponding to this microstructure for a finite element calculation with zset.
def print_zset_material_block(self, mat_file, grain_prefix='_ELSET'): f = open('elset_list.txt', 'w') for g in self.grains: o = g.orientation f.write( ' **elset %s%d *file %s *integration theta_method_a 1.0 1.e-9 150 *rotation %7.3f %7.3f %7.3f\n' % ( grain_prefix, g.id, mat_file, o.phi1(), o.Phi(), o.phi2())) f.close()
[ "def pwr_assembly():\n\n model = openmc.model.Model()\n\n # Define materials.\n fuel = openmc.Material(name='Fuel')\n fuel.set_density('g/cm3', 10.29769)\n fuel.add_nuclide('U234', 4.4843e-6)\n fuel.add_nuclide('U235', 5.5815e-4)\n fuel.add_nuclide('U238', 2.2408e-2)\n fuel.add_nuclide('O16', 4.5829e-2)\n\n clad = openmc.Material(name='Cladding')\n clad.set_density('g/cm3', 6.55)\n clad.add_nuclide('Zr90', 2.1827e-2)\n clad.add_nuclide('Zr91', 4.7600e-3)\n clad.add_nuclide('Zr92', 7.2758e-3)\n clad.add_nuclide('Zr94', 7.3734e-3)\n clad.add_nuclide('Zr96', 1.1879e-3)\n\n hot_water = openmc.Material(name='Hot borated water')\n hot_water.set_density('g/cm3', 0.740582)\n hot_water.add_nuclide('H1', 4.9457e-2)\n hot_water.add_nuclide('O16', 2.4672e-2)\n hot_water.add_nuclide('B10', 8.0042e-6)\n hot_water.add_nuclide('B11', 3.2218e-5)\n hot_water.add_s_alpha_beta('c_H_in_H2O')\n\n # Define the materials file.\n model.materials = (fuel, clad, hot_water)\n\n # Instantiate ZCylinder surfaces\n fuel_or = openmc.ZCylinder(x0=0, y0=0, r=0.39218, name='Fuel OR')\n clad_or = openmc.ZCylinder(x0=0, y0=0, r=0.45720, name='Clad OR')\n\n # Create boundary planes to surround the geometry\n pitch = 21.42\n min_x = openmc.XPlane(x0=-pitch/2, boundary_type='reflective')\n max_x = openmc.XPlane(x0=+pitch/2, boundary_type='reflective')\n min_y = openmc.YPlane(y0=-pitch/2, boundary_type='reflective')\n max_y = openmc.YPlane(y0=+pitch/2, boundary_type='reflective')\n\n # Create a fuel pin universe\n fuel_pin_universe = openmc.Universe(name='Fuel Pin')\n fuel_cell = openmc.Cell(name='fuel', fill=fuel, region=-fuel_or)\n clad_cell = openmc.Cell(name='clad', fill=clad, region=+fuel_or & -clad_or)\n hot_water_cell = openmc.Cell(name='hot water', fill=hot_water, region=+clad_or)\n fuel_pin_universe.add_cells([fuel_cell, clad_cell, hot_water_cell])\n\n\n # Create a control rod guide tube universe\n guide_tube_universe = openmc.Universe(name='Guide Tube')\n gt_inner_cell = openmc.Cell(name='guide tube inner water', fill=hot_water,\n region=-fuel_or)\n gt_clad_cell = openmc.Cell(name='guide tube clad', fill=clad,\n region=+fuel_or & -clad_or)\n gt_outer_cell = openmc.Cell(name='guide tube outer water', fill=hot_water,\n region=+clad_or)\n guide_tube_universe.add_cells([gt_inner_cell, gt_clad_cell, gt_outer_cell])\n\n # Create fuel assembly Lattice\n assembly = openmc.RectLattice(name='Fuel Assembly')\n assembly.pitch = (pitch/17, pitch/17)\n assembly.lower_left = (-pitch/2, -pitch/2)\n\n # Create array indices for guide tube locations in lattice\n template_x = np.array([5, 8, 11, 3, 13, 2, 5, 8, 11, 14, 2, 5, 8,\n 11, 14, 2, 5, 8, 11, 14, 3, 13, 5, 8, 11])\n template_y = np.array([2, 2, 2, 3, 3, 5, 5, 5, 5, 5, 8, 8, 8, 8,\n 8, 11, 11, 11, 11, 11, 13, 13, 14, 14, 14])\n\n # Create 17x17 array of universes\n assembly.universes = np.tile(fuel_pin_universe, (17, 17))\n assembly.universes[template_x, template_y] = guide_tube_universe\n\n # Create root Cell\n root_cell = openmc.Cell(name='root cell', fill=assembly)\n root_cell.region = +min_x & -max_x & +min_y & -max_y\n\n # Create root Universe\n model.geometry.root_universe = openmc.Universe(name='root universe')\n model.geometry.root_universe.add_cell(root_cell)\n\n model.settings.batches = 10\n model.settings.inactive = 5\n model.settings.particles = 100\n model.settings.source = openmc.Source(space=openmc.stats.Box(\n [-pitch/2, -pitch/2, -1], [pitch/2, pitch/2, 1], only_fissionable=True))\n\n plot = openmc.Plot()\n plot.origin = (0.0, 0.0, 0)\n plot.width = (21.42, 21.42)\n plot.pixels = (300, 300)\n plot.color_by = 'material'\n model.plots.append(plot)\n\n return model", "def export_material_property(self, name='', flags=0x0001,\n ambient=(1.0, 1.0, 1.0), diffuse=(1.0, 1.0, 1.0),\n specular=(0.0, 0.0, 0.0), emissive=(0.0, 0.0, 0.0),\n gloss=10.0, alpha=1.0, emitmulti=1.0):\n\n # create block (but don't register it yet in self.blocks)\n matprop = NifFormat.NiMaterialProperty()\n\n # list which determines whether the material name is relevant or not\n # only for particular names this holds, such as EnvMap2\n # by default, the material name does not affect rendering\n specialnames = (\"EnvMap2\", \"EnvMap\", \"skin\", \"Hair\",\n \"dynalpha\", \"HideSecret\", \"Lava\")\n\n # hack to preserve EnvMap2, skinm, ... named blocks (even if they got\n # renamed to EnvMap2.xxx or skin.xxx on import)\n if self.properties.game in ('OBLIVION', 'FALLOUT_3'):\n for specialname in specialnames:\n if (name.lower() == specialname.lower()\n or name.lower().startswith(specialname.lower() + \".\")):\n if name != specialname:\n self.warning(\"Renaming material '%s' to '%s'\"\n % (name, specialname))\n name = specialname\n\n # clear noname materials\n if name.lower().startswith(\"noname\"):\n self.warning(\"Renaming material '%s' to ''\" % name)\n name = \"\"\n\n matprop.name = name\n matprop.flags = flags\n matprop.ambient_color.r = ambient[0]\n matprop.ambient_color.g = ambient[1]\n matprop.ambient_color.b = ambient[2]\n matprop.diffuse_color.r = diffuse[0]\n matprop.diffuse_color.g = diffuse[1]\n matprop.diffuse_color.b = diffuse[2]\n matprop.specular_color.r = specular[0]\n matprop.specular_color.g = specular[1]\n matprop.specular_color.b = specular[2]\n matprop.emissive_color.r = emissive[0]\n matprop.emissive_color.g = emissive[1]\n matprop.emissive_color.b = emissive[2]\n matprop.glossiness = gloss\n matprop.alpha = alpha\n matprop.emit_multi = emitmulti\n\n # search for duplicate\n # (ignore the name string as sometimes import needs to create different\n # materials even when NiMaterialProperty is the same)\n for block in self.blocks:\n if not isinstance(block, NifFormat.NiMaterialProperty):\n continue\n\n # when optimization is enabled, ignore material name\n if self.EXPORT_OPTIMIZE_MATERIALS:\n ignore_strings = not(block.name in specialnames)\n else:\n ignore_strings = False\n\n # check hash\n first_index = 1 if ignore_strings else 0\n if (block.get_hash()[first_index:] ==\n matprop.get_hash()[first_index:]):\n self.warning(\n \"Merging materials '%s' and '%s'\"\n \" (they are identical in nif)\"\n % (matprop.name, block.name))\n return block\n\n # no material property with given settings found, so use and register\n # the new one\n return self.register_block(matprop)", "def CA_MaterialField(self):\r\n comm = ''\r\n comm += '# CA_MaterialField\\n'\r\n comm += 'MtrlFld=AFFE_MATERIAU(MAILLAGE=MESH,\\n'\r\n comm += ' AFFE=(\\n'\r\n\r\n mat2Props = self.getPropertiesByMid()\r\n for mid, material in sorted(self.materials.iteritems()):\r\n comm += ' _F(GROUP_MA=('\r\n pids = mat2Props[mid]\r\n #comm += \" \"\r\n for pid in pids:\r\n comm += \"'P%s',\" % (pid)\r\n comm = comm[:-1] + '),\\n'\r\n comm += \" MATER=M%s),\\n\" % (mid)\r\n\r\n comm = comm[:-1] + '));\\n'\r\n comm += self.breaker()\r\n return comm", "def _build_formulation(self):\n add_formulation_to_block(\n block=self.block,\n model_definition=self.network_definition,\n input_vars=self.block.inputs_list,\n output_vars=self.block.outputs_list,\n )", "def getDisplayBlock(self):\n blockData = self.getHandle().getDisplayBlock()\n return CraftMagicNumbers.getMaterial(blockData.getBlock()).getNewData(int(blockData.getBlock().toLegacyData(blockData)))", "def GetElementMaterial(self):\n\t\treturn self._ElementMaterial", "def add_static_material(self):\n self.materials.update_data(\n 'M1',\n {'Young_modulus': np.array([1.]), 'Poisson_ratio': np.array([.3])})\n self.sections.update_data(\n 'M1', {'TYPE': 'SOLID', 'EGRP': 'ALL'})\n return", "def setDisplayBlock(self, material):\n if material != None:\n block = CraftMagicNumbers.getBlock(material.getItemTypeId()).fromLegacyData(material.getData())\n self.getHandle().setDisplayBlock(block)\n else:\n self.getHandle().setDisplayBlock(Blocks.AIR.getBlockData())\n self.getHandle().a(False)", "def material(self):\n return self.edb_padstack.GetData().GetMaterial()", "def create_material_file(self):\n\t # create and open the material.dat file\n\t with open(self.userPath + '/material.dat', 'w') as material_file:\n\t # for each material\n\t for material in self.materials:\n\t # write the type of the material\n\t line = 'material ' + material['MaterialType'] + ' [ \\n'\n\t material_file.write(line)\n\t #write the name of the material\n\t line = 'name='+material['name'] +'\\n'\n\t material_file.write(line)\n\t # for each parameter we write it in the material file\n\t # except if this is a range a value\n\t for key, value in material.items():\n\t \tprint(key)\n\t if key != 'MaterialType' and key != 'name':\n\t if type(value) != dict:\n\t line = key + '=' + str(value) + '\\n'\n\t material_file.write(line)\n\t else:\n\t # define a key so that we can create the job for this\n\t # parameter in this specific material\n\t new_key = 'Material_'+material['name'] + '_' + key\n\t # define the range from the infos in the json file\n\t range_values = self.define_range(value)\n\t # append this new variable in the parametric space\n\t self.parametric_space[new_key] = range_values\n\t # and we define a standard value for this parameter in the file\n\t # we will take the first value of the range\n\t default_value = range_values[0]\n\t line = key + '=' + str(default_value) + '\\n'\n\t material_file.write(line)\n\t material_file.write(']')", "def output_zmatrix(self, output_file):\n with open(output_file, 'w') as f:\n f.write(self.str_zmatrix())", "def calc_zm(self,material,z,m,comp, comp_err):\r\n z_total = 0.\r\n m_total = 0.\r\n p1 = []\r\n p2 = []\r\n for x in range (0,len(z)):\r\n z_total+= z[x]*comp[x]\r\n m_total+= m[x]*comp[x]\r\n p1.append((z[x] * comp_err[x]/comp[x])**2)\r\n p2.append((m[x] * comp_err[x]/comp[x])**2)\r\n Z_err_1 = 0.\r\n M_err_1 = 0.\r\n for x in range (0,len(z)):\r\n Z_err_1+= p1[x]\r\n M_err_1+= p2[x]\r\n Z_err = np.sqrt(Z_err_1)\r\n M_err = np.sqrt(M_err_1)\r\n zm= z_total/ m_total\r\n zm_err = zm * np.sqrt((Z_err/z_total)**2 + (M_err/m_total)**2 )\r\n print(material+':')\r\n print('z:'+ str(z_total)+' +/- '+ str(Z_err))\r\n print('m:'+ str(m_total)+' +/- ' + str(M_err))\r\n print('z/m:'+ str( zm)+' +/- '+ str(zm_err))", "def get_sample():\n # defining materials\n m_ambience = ba.HomogeneousMaterial(\"Air\", 0.0, 0.0)\n m_substrate = ba.HomogeneousMaterial(\"Substrate\", 6e-6, 2e-8)\n m_particle = ba.HomogeneousMaterial(\"Particle\", 6e-4, 2e-8)\n\n # mesocrystal lattice\n lattice_basis_1 = ba.kvector_t(5.0, 0.0, 0.0)\n lattice_basis_2 = ba.kvector_t(0.0, 5.0, 0.0)\n lattice_basis_3 = ba.kvector_t(0.0, 0.0, 5.0)\n lattice = ba.Lattice(lattice_basis_1, lattice_basis_2, lattice_basis_3)\n\n # spherical particle that forms the base of the mesocrystal\n sphere_ff = ba.FormFactorFullSphere(2*nm)\n sphere = ba.Particle(m_particle, sphere_ff)\n\n # crystal structure\n crystal = ba.Crystal(sphere, lattice)\n\n # mesocrystal\n meso_ff = ba.FormFactorCylinder(20 * nm, 50 * nm)\n meso = ba.MesoCrystal(crystal, meso_ff)\n\n particle_layout = ba.ParticleLayout()\n particle_layout.addParticle(meso)\n\n air_layer = ba.Layer(m_ambience)\n air_layer.addLayout(particle_layout)\n substrate_layer = ba.Layer(m_substrate)\n\n multi_layer = ba.MultiLayer()\n multi_layer.addLayer(air_layer)\n multi_layer.addLayer(substrate_layer)\n return multi_layer", "def __repr__(self) -> str:\r\n string = f\"Material = {self.mat}\\n\\n\"\r\n string += f\"temperature = {self.temperature}\\npressure = {self.pressure}\\n\"\r\n return string", "def z(self) -> None:\n self.circ.z(self.qregisters[\"data\"][0])", "def test_set_material(self):\n self.widget.w_mat.c_mat_type.setCurrentIndex(0)\n assert self.test_obj.rotor.slot.magnet[0].mat_type.name == \"test1\"\n assert self.test_obj.rotor.slot.magnet[0].mat_type.elec.rho == 0.31", "def to_basic_block(self):\n return _raw_util.raw_divide_ff_sptr_to_basic_block(self)", "def create_FEM(self):\n ops.uniaxialMaterial('Elastic', self.id, self.E_0)", "def readMaterials(self, filename):\n \n currentMat = None\n commentre = re.compile(\"^#\")\n newmatre = re.compile(\"^newmtl ([^\\s]+)\")\n mapre = re.compile(\"^map_Kd ([^\\s]+)\")\n illumre = re.compile(\"^illum ([0-9]+)\")\n floatrx = '(-?([0-9]+(\\.[0-9]+)?e(\\+|-)[0-9]+)|-?([0-9]+(\\.[0-9]+)?))'\n kre = re.compile(\"^(K[dase])\\s+\" + floatrx + \"\\s+\" + floatrx + \"\\s+\" + floatrx)\n nre = re.compile(\"^(N[s])\\s+\" + floatrx)\n for line in open(filename).readlines():\n if commentre.match(line):\n continue\n newmatmatch = newmatre.match(line)\n if newmatmatch:\n if currentMat != None:\n self.materials.append(currentMat)\n currentMat = Material(newmatmatch.groups()[0])\n continue\n mapmatch = mapre.match(line)\n if mapmatch:\n currentMat.map_Kd = os.path.join(os.path.dirname(filename), mapmatch.groups()[0])\n continue\n illummatch = illumre.match(line)\n if illummatch:\n currentMat.illum = int(illummatch.groups()[0])\n continue\n kmatch = kre.match(line)\n if kmatch:\n t = kmatch.groups()[0]\n val = (kmatch.groups()[1], kmatch.groups()[7], kmatch.groups()[11])\n if t == \"Kd\":\n currentMat.Kd = val\n elif t == \"Ka\":\n currentMat.Ka = val\n elif t == \"Ks\":\n currentMat.Ks = val\n elif t == \"Ke\":\n currentMat.Ke = val\n nmatch = nre.match(line)\n if nmatch:\n t = nmatch.groups()[0]\n if t == \"Ns\":\n currentMat.Ns = nmatch.groups()[1]\n \n if currentMat != None:\n self.materials.append(currentMat)", "def create_FEM(self):\n ops.uniaxialMaterial('Steel4', self.id, self.f_y, self.E_0,\n '-asym',\n '-kin',\n self.b_k, self.R_0, self.r_1, self.r_2,\n self.b_kc, self.R_0c, self.r_1c, self.r_2c,\n '-iso',\n self.b_i, self.rho_i, self.b_l, self.R_i,\n self.l_yp,\n self.b_ic, self.rho_ic, self.b_lc, self.R_ic,\n '-ult',\n self.f_u, self.R_u,\n self.f_uc, self.R_uc)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write the microstructure as a hdf5 file.
def to_h5(self): import time from pymicro import __version__ as pymicro_version print('opening file %s.h5 for writing' % self.name) f = h5py.File('%s.h5' % self.name, 'w') f.attrs['Pymicro_Version'] = np.string_(pymicro_version) f.attrs['HDF5_Version'] = h5py.version.hdf5_version f.attrs['h5py_version'] = h5py.version.version f.attrs['file_time'] = time.time() f.attrs['microstructure_name'] = self.name if hasattr(self, 'data_dir'): f.attrs['data_dir'] = self.data_dir # ensemble data ed = f.create_group('EnsembleData') cs = ed.create_group('CrystalStructure') sym = self.get_lattice().get_symmetry() cs.attrs['symmetry'] = sym.to_string() lp = cs.create_dataset('LatticeParameters', data=np.array(self.get_lattice().get_lattice_parameters(), dtype=np.float32)) # feature data fd = f.create_group('FeatureData') grain_ids = fd.create_dataset('grain_ids', data=np.array([g.id for g in self.grains], dtype=np.int)) avg_rods = fd.create_dataset('R_vectors', data=np.array([g.orientation.rod for g in self.grains], dtype=np.float32)) centers = fd.create_dataset('centers', data=np.array([g.center for g in self.grains], dtype=np.float32)) # cell data cd = f.create_group('CellData') if hasattr(self, 'grain_map') and self.grain_map is not None: gm = cd.create_dataset('grain_ids', data=self.grain_map, compression='gzip', compression_opts=9) gm.attrs['voxel_size'] = self.voxel_size if hasattr(self, 'mask') and self.mask is not None: ma = cd.create_dataset('mask', data=self.mask, compression='gzip', compression_opts=9) ma.attrs['voxel_size'] = self.voxel_size print('done writing') f.close()
[ "def write_hdf5_mesh(self):\n self._mesh.write_hdf5()", "def writeHD5():\n global Data1\n\n store = HDFStore('.\\store.h5')\n store['listCrisis'] = Data1\n store.close()", "def _save_hdf5(self, output_file_path=None):\n #save sync data\n if output_file_path:\n filename = output_file_path\n else:\n filename = self.output_path+\".h5\"\n data = np.fromfile(self.output_path, dtype=np.uint32)\n total_samples = len(data)\n\n events = self._get_events(data)\n\n h5_output = h5.File(filename, 'w')\n h5_output.create_dataset(\"data\", data=events)\n #save meta data\n meta_data = self._get_meta_data()\n meta_data['total_samples'] = total_samples\n\n meta_data_np = np.string_(str(meta_data))\n h5_output.create_dataset(\"meta\", data=meta_data_np)\n h5_output.close()\n\n #remove raw file\n if not self.save_raw:\n os.remove(self.output_path)\n\n if self.verbose:\n logging.info(\"Recorded %i events.\" % (len(events)-1))\n logging.info(\"Metadata: %s\" % meta_data)\n logging.info(\"Saving to %s\" % filename)\n try:\n ds = Dataset(filename)\n ds.stats()\n ds.close()\n except Exception as e:\n logging.warning(\"Failed to print quick stats: %s\" % e)", "def write(self):\n \n hdulist = fits.HDUList()\n\n level0 = self.get_level0()\n hdulist.append(level0)\n \n level1 = self.get_level1()\n hdulist.append(level1)\n \n level2 = self.get_level2()\n hdulist.append(level2)\n \n level3 = self.get_level3()\n hdulist.append(level3)\n \n level4 = self.get_level4()\n hdulist.append(level4)\n \n hdulist.writeto(self.metadata_file,clobber=True)\n print('Output metadata to '+self.metadata_file)", "def write_to_file(self, struct, fName):\n\n f = h5py.File(fName, \"w\")\n self._recursive_write(f, struct)\n f.close()", "def write_to_file(self, filename):\n # check to see if the map exists from instantiation\n if hasattr(self, 'map'):\n sunpy_meta = self.map.meta\n\n psihdf.wrh5_meta(filename, self.x, self.y, np.array([]),\n self.data, chd_meta=self.info, sunpy_meta=sunpy_meta)", "def write_hdf5(self, hdf5_name):\n self.games.to_hdf('{}{}'.format(DATA_PATH, hdf5_name), 'table', append = False)", "def write_VCF_to_hdf5(VCF_dat, out_file):\n import h5py\n \n f = h5py.File(out_file, 'w')\n f.create_dataset(\"contigs\", data=np.string_(VCF_dat['contigs']), \n compression=\"gzip\", compression_opts=9)\n f.create_dataset(\"samples\", data=np.string_(VCF_dat['samples']), \n compression=\"gzip\", compression_opts=9)\n f.create_dataset(\"variants\", data=np.string_(VCF_dat['variants']), \n compression=\"gzip\", compression_opts=9)\n f.create_dataset(\"comments\", data=np.string_(VCF_dat['comments']), \n compression=\"gzip\", compression_opts=9)\n \n ## variant fixed information\n fixed = f.create_group(\"FixedINFO\")\n for _key in VCF_dat['FixedINFO']:\n fixed.create_dataset(_key, data=np.string_(VCF_dat['FixedINFO'][_key]), \n compression=\"gzip\", compression_opts=9)\n \n ## genotype information for each sample\n geno = f.create_group(\"GenoINFO\")\n for _key in VCF_dat['GenoINFO']:\n geno.create_dataset(_key, data=np.string_(VCF_dat['GenoINFO'][_key]), \n compression=\"gzip\", compression_opts=9)\n \n f.close()", "def save(self, filename):\n if (filename[-5:] != '.hmat'):\n filename += '.hmat'\n h5f = h5py.File(filename, 'w')\n h5f.create_dataset('matrix', data=self.matrix, compression = 'gzip', compression_opts=9)\n h5f.create_dataset('idx', data=self.idx, compression = 'gzip', compression_opts=9)\n h5f.create_dataset('applyedMethods', data=cPickle.dumps(self._applyedMethods))\n if hasattr(self,\"genome\") and hasattr(self,\"resolution\"):\n h5f.create_dataset('genome',data = cPickle.dumps(self.genome))\n h5f.create_dataset('resolution',data = cPickle.dumps(self.resolution))\n else:\n warnings.warn(\"No genome and resolution is specified, attributes are recommended for matrix.\")\n \n h5f.close()", "def write_10X_h5(filename, matrix, features, barcodes, genome='GRCh38', datatype='Peak'):\n\n f = h5py.File(filename, 'w')\n\n if datatype == 'Peak':\n\n M = sp_sparse.csc_matrix(matrix, dtype=numpy.int8)\n\n else:\n\n M = sp_sparse.csc_matrix(matrix, dtype=numpy.float32)\n\n B = numpy.array(barcodes, dtype='|S200')\n\n P = numpy.array(features, dtype='|S100')\n\n GM = numpy.array([genome] * len(features), dtype='|S10')\n\n FT = numpy.array([datatype] * len(features), dtype='|S100')\n\n AT = numpy.array(['genome'], dtype='|S10')\n\n mat = f.create_group('matrix')\n\n mat.create_dataset('barcodes', data=B)\n\n mat.create_dataset('data', data=M.data)\n\n mat.create_dataset('indices', data=M.indices)\n\n mat.create_dataset('indptr', data=M.indptr)\n\n mat.create_dataset('shape', data=M.shape)\n\n fet = mat.create_group('features')\n\n fet.create_dataset('_all_tag_keys', data=AT)\n\n fet.create_dataset('feature_type', data=FT)\n\n fet.create_dataset('genome', data=GM)\n\n fet.create_dataset('id', data=P)\n\n fet.create_dataset('name', data=P)\n\n f.close()", "def to_file(self, filename):\n assert self.standards # ensure preprocess_fingerprints() completed\n libver = self.settings['libver']\n with h5py.File(filename, 'w', libver=libver) as h5f:\n for m_name, attrs, m_dict in zip(\n self.m_names, self.m_attrs, self.all):\n path = 'Preprocessed/{}'.format(m_name)\n write_to_group(h5f, path, attrs, m_dict)\n\n scaling_standards = {'standard_{}'.format(j): standard\n for j, standard in enumerate(self.standards)}\n write_to_group(h5f, 'system',\n {'sys_elements': np.string_(self.sys_elements)},\n scaling_standards)", "def open_halo_output(self):\n \n \n try:\n self.halo_output_file = h5.File(self.HDF_output_filepath, \"w\")\n\n except OSError:\n for obj in gc.get_objects(): # Browse through ALL objects\n if isinstance(obj, h5.File): # Just HDF5 files\n try:\n obj.close()\n except:\n pass # Was already closed \n self.halo_output_file = h5.File(self.HDF_output_filepath, \"w\")\n\n self.halo_output_dataset = self.halo_output_file.create_dataset(\n \"halo_data\", (0,), maxshape=(None,), dtype=self.dtype_halo, compression=\"gzip\"\n )\n \n self.subhalo_output_dataset = self.halo_output_file.create_dataset(\n \"subhalo_data\", (0,), maxshape=(None,), dtype=self.subhalo_dtype, compression=\"gzip\"\n )\n \n return None", "def exportHDF5(self, fileName, groupName=None, writeSamples=False):\n # Check if already in hdf5 file first\n if os.path.exists(fileName):\n mode = \"a\"\n else:\n mode = \"w\"\n\n # Specify the root address for writing\n ROOT = \"/\"\n if groupName is None:\n baseAddress = ROOT\n else:\n baseAddress = os.path.join(ROOT, groupName)\n\n # And the data is stored in\n bootAddress = os.path.join(baseAddress, \"bootstrap\")\n\n # Open the HDF5 file\n with h5py.File(fileName, mode) as f:\n # Check whether group already exists\n baseGroup = f.get(baseAddress)\n if baseGroup is None:\n baseGroup = f.create_group(baseAddress)\n # Check wether bootstrap data exists\n if \"bootstrap\" in baseGroup.keys():\n raise KeyError(\n \"Group >bootstrap< already exist for base group >{}<. Stop writing\".format(\n groupName\n ),\n groupName\n )\n bootGroup = f.create_group(bootAddress)\n\n # Now write parameters\n for key, val in self.parameters.items():\n bootGroup.create_dataset(key, data=val)\n # Write indices\n bootGroup.create_dataset(\"indices\", data=self.indices)\n # Write samples if requested\n if writeSamples:\n bootGroup.create_dataset(\"samples\", data=self.samples)", "def write_hdf5(data, filename):\n import h5py as hp\n import numpy as np\n hfile = hp.File(filename, 'w')\n typ = type(data)\n if typ == dict:\n for k in data.iterkeys():\n # The straight code gives ustrings, which I don't like.\n# hfile[k] = data[k]\n exec(\"hfile['\" + k + \"'] = data['\" + k + \"']\")\n elif typ == np.ndarray:\n hfile['data'] = data\n hfile.close()", "def save_to_hdf5(h5group, obj, path='/'):\n return Hdf5Saver(h5group).save(obj, path)", "def mat2h5(config):\n dataset_name = config.dataset_name\n base_path = config.data_path\n mat_dir = os.path.join(base_path, 'data_mat')\n h5_dir = os.path.join(base_path, 'data_h5')\n if dataset_name == 'Salinas':\n dataset_mat_dir = os.path.join(mat_dir, '{name}/{name}_corrected.mat'.format(name=dataset_name))\n dataset_gt_dir = os.path.join(mat_dir, '{name}/{name}_gt.mat'.format(name=dataset_name))\n dataset_h5_save_dir = os.path.join(h5_dir, '{}.h5'.format(dataset_name))\n elif dataset_name == 'Indian':\n dataset_mat_dir = os.path.join(mat_dir, '{name}/{name}_pines_corrected.mat'.format(name=dataset_name))\n dataset_gt_dir = os.path.join(mat_dir, '{name}/{name}_pines_gt.mat'.format(name=dataset_name))\n dataset_h5_save_dir = os.path.join(h5_dir, '{}.h5'.format(dataset_name))\n elif dataset_name == 'WHU_Hi_HongHu':\n dataset_mat_dir = os.path.join(mat_dir, '{name}/{name}.mat'.format(name=dataset_name))\n dataset_gt_dir = os.path.join(mat_dir, '{name}/{name}_gt.mat'.format(name=dataset_name))\n dataset_h5_save_dir = os.path.join(h5_dir, '{}.h5'.format(dataset_name))\n hsi_data = sio.loadmat(dataset_mat_dir)[config.dataset_HSI]\n hsi_gt = sio.loadmat(dataset_gt_dir)[config.dataset_gt]\n with h5py.File(dataset_h5_save_dir, 'w') as f:\n f['data'] = hsi_data\n f['label'] = hsi_gt", "def saveHDF(self, filename, dataset, data, metadata_func=None):\n with h5py.File(filename, 'r+') as f:\n f.create_dataset(dataset, data=data)\n f[dataset].attrs['mode'] = self._name\n f[dataset].attrs['exposure'] = self._cam.exposure\n f[dataset].attrs['em_gain'] = self._cam.EM._read_gain_from_camera()\n f[dataset].attrs['created'] = time.strftime(\"%d/%m/%Y %H:%M:%S\")\n if metadata_func is not None:\n metadata_func(f[dataset])", "def temp_emsoft_h5ebsd_file(tmpdir, request):\n f = File(tmpdir.join(\"emsoft_h5ebsd_file.h5\"), mode=\"w\")\n\n # Unpack parameters\n map_shape, (dy, dx), example_rotations, n_top_matches, refined = request.param\n ny, nx = map_shape\n map_size = ny * nx\n\n # Create groups used in reader\n ebsd_group = f.create_group(\"Scan 1/EBSD\")\n data_group = ebsd_group.create_group(\"Data\")\n header_group = ebsd_group.create_group(\"Header\")\n phase_group = header_group.create_group(\"Phase/1\") # Always single phase\n\n # Create `header_group` datasets used in reader\n for name, data, dtype in zip(\n [\"nRows\", \"nColumns\", \"Step Y\", \"Step X\"],\n [ny, nx, dy, dx],\n [np.int32, np.int32, np.float32, np.float32],\n ):\n header_group.create_dataset(name, data=np.array([data], dtype=dtype))\n\n # Create `data_group` datasets, mostly quality metrics\n data_group.create_dataset(\"X Position\", data=np.tile(np.arange(nx) * dx, ny))\n # Note that \"Y Position\" is wrongly written to their h5ebsd file by EMsoft\n data_group.create_dataset(\n \"Y Position\",\n data=np.tile(np.arange(nx) * dx, ny), # Wrong\n # data=np.sort(np.tile(np.arange(ny) * dy, nx)), # Correct\n )\n for name, shape, dtype in [\n (\"AvDotProductMap\", map_shape, np.int32),\n (\"CI\", map_size, np.float32),\n (\"CIMap\", map_shape, np.int32),\n (\"IQ\", map_size, np.float32),\n (\"IQMap\", map_shape, np.int32),\n (\"ISM\", map_size, np.float32),\n (\"ISMap\", map_shape, np.int32),\n (\"KAM\", map_shape, np.float32),\n (\"OSM\", map_shape, np.float32),\n (\"Phase\", map_size, np.uint8),\n ]:\n data_group.create_dataset(name, data=np.zeros(shape, dtype=dtype))\n\n # `data_group` with rotations\n # Sample as many rotations from `rotations` as `map_size`\n rot_idx = np.random.choice(np.arange(len(example_rotations)), map_size)\n rot = example_rotations[rot_idx]\n n_sampled_oris = 333227 # Cubic space group with Ncubochoric = 100\n data_group.create_dataset(\"FZcnt\", data=np.array([n_sampled_oris], dtype=np.int32))\n data_group.create_dataset(\n \"TopMatchIndices\",\n data=np.vstack(\n (np.random.choice(np.arange(n_sampled_oris), n_top_matches),) * map_size\n ),\n dtype=np.int32,\n )\n data_group.create_dataset(\n \"TopDotProductList\",\n data=np.vstack((np.random.random(size=n_top_matches),) * map_size),\n dtype=np.float32,\n )\n data_group.create_dataset(\n \"DictionaryEulerAngles\",\n data=np.column_stack(\n (np.random.uniform(low=0, high=2 * np.pi, size=n_sampled_oris),) * 3\n ),\n dtype=np.float32,\n )\n\n if refined:\n data_group.create_dataset(\"RefinedEulerAngles\", data=rot.astype(np.float32))\n data_group.create_dataset(\n \"RefinedDotProducts\", data=np.zeros(map_size, dtype=np.float32)\n )\n\n # Number of top matches kept\n f.create_dataset(\n \"NMLparameters/EBSDIndexingNameListType/nnk\",\n data=np.array([n_top_matches], dtype=np.int32),\n )\n\n # `phase_group`\n for name, data in [\n (\"Point Group\", \"Cubic (Oh) [m3m]\"),\n (\"MaterialName\", \"austenite/austenite\"),\n (\"Lattice Constant a\", \"3.595\"),\n (\"Lattice Constant b\", \"3.595\"),\n (\"Lattice Constant c\", \"3.595\"),\n (\"Lattice Constant alpha\", \"90.000\"),\n (\"Lattice Constant beta\", \"90.000\"),\n (\"Lattice Constant gamma\", \"90.000\"),\n ]:\n phase_group.create_dataset(name, data=np.array([data], dtype=np.dtype(\"S\")))\n\n yield f\n gc.collect()", "def saveToHDF5(self,Hdf5Group):\n idx=0\n for cm in self:\n subgroup = Hdf5Group.create_group('%05d'%idx)\n subgroup.attrs['index']=idx\n cm.saveToHDF5(subgroup)\n idx+=1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
read a microstructure object from a HDF5 file.
def from_h5(file_path): with h5py.File(file_path, 'r') as f: micro = Microstructure(name=f.attrs['microstructure_name']) if 'symmetry' in f['EnsembleData/CrystalStructure'].attrs: sym = f['EnsembleData/CrystalStructure'].attrs['symmetry'] parameters = f['EnsembleData/CrystalStructure/LatticeParameters'][()] micro.set_lattice(Lattice.from_symmetry(Symmetry.from_string(sym), parameters)) if 'data_dir' in f.attrs: micro.data_dir = f.attrs['data_dir'] # load feature data if 'R_vectors' in f['FeatureData']: print('some grains') avg_rods = f['FeatureData/R_vectors'][()] print(avg_rods.shape) if 'grain_ids' in f['FeatureData']: grain_ids = f['FeatureData/grain_ids'][()] else: grain_ids = range(1, 1 + avg_rods.shape[0]) if 'centers' in f['FeatureData']: centers = f['FeatureData/centers'][()] else: centers = np.zeros_like(avg_rods) for i in range(avg_rods.shape[0]): g = Grain(grain_ids[i], Orientation.from_rodrigues(avg_rods[i, :])) g.center = centers[i] micro.grains.append(g) # load cell data if 'grain_ids' in f['CellData']: micro.grain_map = f['CellData/grain_ids'][()] if 'voxel_size' in f['CellData/grain_ids'].attrs: micro.voxel_size = f['CellData/grain_ids'].attrs['voxel_size'] if 'mask' in f['CellData']: micro.mask = f['CellData/mask'][()] if 'voxel_size' in f['CellData/mask'].attrs: micro.voxel_size = f['CellData/mask'].attrs['voxel_size'] return micro
[ "def load_from_hdf5(obj, fname, obj_name):\n\n h5 = h5py.File(fname, 'r')\n root_grp = h5[obj_name]\n\n load_object(root_grp, obj, obj_name)\n h5.close()", "def load_h5(self):\n path = os.path.join(self.directory, self.filename)\n self.h5file = tb.open_file(path, mode=self.mode)\n self.root = self.h5file.get_node(self.root_name)", "def load_h5(f, path):\n # Sparse array.\n if f.has_attr(path, 'sparse_type'):\n if f.read_attr(path, 'sparse_type') == 'csr':\n return SparseCSR.load_h5(f, path)\n else:\n raise NotImplementedError(\"Only SparseCSR arrays are implemented \"\n \"currently.\")\n # Regular dense dataset.\n else:\n return f.read(path)[...]", "def load(self, h5):\n # open file for reading\n if isinstance(h5, str):\n h5 = h5py.File(h5, 'r')\n \n # load features\n self._positives = torch.from_numpy(h5[\"Features\"][:])\n \n # load extreme vectors\n e = h5['ExtremeVectors']\n obj = dict(Scale=torch.from_numpy(e['scale'][()]),Shape = torch.from_numpy(e['shape'][()]),signTensor = torch.tensor(e['sign'][()]),translateAmountTensor = torch.LongTensor(e['translateAmount'][()]),smallScoreTensor = torch.from_numpy(e['smallScore'][()]))\n self._extreme_vectors = weibull.weibull(obj)\n self._extreme_vectors_indexes = torch.tensor(e['indexes'][()])\n\n cv = []\n # load covered indices\n for i in range(len(self._extreme_vectors_indexes)):\n cv.append(torch.from_numpy(numpy.array(e['CoveredVectors/'+str(i)][()])))\n self._covered_vectors = cv\n \n # load other parameteres\n self.distance_function = e.attrs[\"Distance\"]\n self.tailsize = e.attrs[\"Tailsize\"]\n self._label = e.attrs[\"Label\"]\n if self._label == -1: self._label = None\n self.cover_threshold = e.attrs[\"CoverThreshold\"]\n if self.cover_threshold == -1.: self.cover_threshold = None", "def read_h5_file(self, h5file_path):\n f = h5py.File(h5file_path, 'r')\n return f['wav']", "def loadH5Parts(*args, **kwds):\n \n return io.loadH5Parts(*args, **kwds)", "def read_hdf5(filename):\n import h5py as hp\n hfile = hp.File(filename, 'r')\n lenk = len(hfile.keys())\n if lenk == 1:\n data = hfile[hfile.keys()[0]].value\n else:\n data = {}\n for k in hfile.iterkeys():\n # The straight code gives ustrings, which I don't like.\n# data[k] = hfile[k].value\n exec(\"data['\" + k + \"'] = hfile['\" + k + \"'].value\")\n hfile.close()\n return data", "def load_hdf5(self, path):\n f = tables.open_file(os.path.join(path, 'vectors.h5p'), 'r')\n self.matrix = f.root.vectors.read()\n self.vocabulary = Vocabulary()\n self.vocabulary.load(path)\n # self.name += os.path.basename(os.path.normpath(path))\n f.close()", "def dendro_import_hdf5(filename):\n import h5py\n from ..dendrogram import Dendrogram\n from ..structure import Structure\n h5f = h5py.File(filename, 'r')\n d = Dendrogram()\n d.n_dim = h5f.attrs['n_dim']\n d.data = h5f['data'].value\n d.index_map = h5f['index_map'].value\n d.nodes_dict = {}\n\n flux_by_node = {}\n indices_by_node = {}\n\n def _construct_tree(repr):\n nodes = []\n for idx in repr:\n node_indices = indices_by_node[idx]\n f = flux_by_node[idx]\n if type(repr[idx]) == tuple:\n sub_nodes_repr = repr[idx][0] # Parsed representation of sub nodes\n sub_nodes = _construct_tree(sub_nodes_repr)\n for i in sub_nodes:\n d.nodes_dict[i.idx] = i\n b = Structure(node_indices, f, children=sub_nodes, idx=idx)\n # Correct merge levels - complicated because of the\n # order in which we are building the tree.\n # What we do is look at the heights of this branch's\n # 1st child as stored in the newick representation, and then\n # work backwards to compute the merge level of this branch\n first_child_repr = sub_nodes_repr.itervalues().next()\n if type(first_child_repr) == tuple:\n height = first_child_repr[1]\n else:\n height = first_child_repr\n d.nodes_dict[idx] = b\n nodes.append(b)\n else:\n l = Structure(node_indices, f, idx=idx)\n nodes.append(l)\n d.nodes_dict[idx] = l\n return nodes\n\n # Do a fast iteration through d.data, adding the indices and data values\n # to the two dictionaries declared above:\n indices = np.indices(d.data.shape).reshape(d.data.ndim, np.prod(d.data.shape)).transpose()\n\n for coord in indices:\n coord = tuple(coord)\n idx = d.index_map[coord]\n if idx:\n try:\n flux_by_node[idx].append(d.data[coord])\n indices_by_node[idx].append(coord)\n except KeyError:\n flux_by_node[idx] = [d.data[coord]]\n indices_by_node[idx] = [coord]\n\n d.trunk = _construct_tree(_parse_newick(h5f['newick'].value))\n # To make the node.level property fast, we ensure all the items in the\n # trunk have their level cached as \"0\"\n for node in d.trunk:\n node._level = 0 # See the @property level() definition in structure.py\n\n return d", "def loadFile(self, fileName,verbose=False):\n if (os.path.isabs(fileName)):\n self.fileName = os.path.basename(fileName)\n self.fullFileName = fileName\n else:\n self.fileName = fileName\n # make the full file name by joining the input name \n # to the MKID_RAW_PATH (or . if the environment variable \n # is not defined)\n dataDir = os.getenv('MKID_RAW_PATH', '/')\n self.fullFileName = os.path.join(dataDir, self.fileName)\n\n if (not os.path.exists(self.fullFileName)):\n msg='file does not exist: %s'%self.fullFileName\n if verbose:\n print msg\n raise Exception(msg)\n \n #open the hdf5 file\n self.file = tables.open_file(self.fullFileName, mode='r')\n\n ##### TO DO/DELETE #####\n # dark obs files have no header currently (SRM 2017-05-05)\n # can update later by foldingn log files into obs file generation somehow\n # header is currently not used anywhere else in the code anyways. Maybe can just trash this.\n '''\n self.header = self.file.root.header.header\n self.titles = self.header.colnames\n try:\n self.info = self.header[0] #header is a table with one row\n except IndexError as inst:\n if verbose:\n print 'Can\\'t read header for ',self.fullFileName\n raise inst\n '''\n\n # Useful information about data format set here.\n # For now, set all of these as constants.\n # If we get data taken with different parameters, straighten\n # that all out here.\n\n ##### TO DELETE? #####\n ## These parameters are for DARKNESS data\n # May be cleared out later if deprecated (SRM 2017-05-05)\n self.tickDuration = 1e-6 #s\n self.ticksPerSec = int(1.0 / self.tickDuration)\n self.intervalAll = interval[0.0, (1.0 / self.tickDuration) - 1]\n\n\n ##### TO DELETE #####\n # Did not do this in DARKNESS. nonAllocPixels were just flagged in beammap\n # but still assigned a unique location. Correct method will be with beam map flags \n #self.nonAllocPixelName = '/r0/p250/'\n\n\n #get the beam image.\n try:\n self.beamImage = self.file.get_node('/BeamMap/Map').read()\n self.beamMapFlags = self.file.get_node('/BeamMap/Flag').read()\n except Exception as inst:\n if verbose:\n print 'Can\\'t access beamimage for ',self.fullFileName\n raise inst\n\n ##### TO DELETE #####\n # dark obs files have pixels ID'd by resID now, not roach/pixel address\n # Do we need these beamImageRoaches or beamImagePixelNums later?\n '''\n #format for a pixelName in beamImage is /r#/p#/t# where r# is the roach number, p# is the pixel number\n # and t# is the starting timestamp\n self.beamImageRoaches = np.array([[int(s.split('r')[1].split('/')[0]) for s in row] for row in self.beamImage])\n self.beamImagePixelNums = np.array([[int(s.split('p')[1].split('/')[0]) for s in row] for row in self.beamImage])\n '''\n #instead of beamImagePixelNums, we alternatively use beamImagePixelIDs\n #simply the beamImage cast to integer data types from strings\n self.beamImagePixelIDs = np.array(self.beamImage, dtype=int)\n\n #get shape of array from beamImage\n beamShape = self.beamImage.shape\n self.nRow = beamShape[0]\n self.nCol = beamShape[1]\n\n #make pointer to data table\n self.data = self.file.root.Photons.data\n\n #easy way to check exactly how many seconds of data are supposedly recorded\n self.totalIntegrationTime = self.file.root.Images._g_getnchildren()", "def from_neper(neper_file_path):\n neper_file = neper_file_path.split(os.sep)[-1]\n print('creating microstructure from Neper tesselation %s' % neper_file)\n name, ext = os.path.splitext(neper_file)\n print(name, ext)\n assert ext == '.tesr' # assuming raster tesselation\n micro = Microstructure(name=name)\n with open(neper_file_path, 'r', encoding='latin-1') as f:\n line = f.readline() # ***tesr\n # look for **general\n while True:\n line = f.readline().strip() # get rid of unnecessary spaces\n if line.startswith('**general'):\n break\n dim = f.readline().strip()\n print(dim)\n dims = np.array(f.readline().split()).astype(int).tolist()\n print(dims)\n voxel_size = np.array(f.readline().split()).astype(float).tolist()\n print(voxel_size)\n # look for **cell\n while True:\n line = f.readline().strip()\n if line.startswith('**cell'):\n break\n n = int(f.readline().strip())\n print('microstructure contains %d grains' % n)\n f.readline() # *id\n grain_ids = []\n # look for *ori\n while True:\n line = f.readline().strip()\n if line.startswith('*ori'):\n break\n else:\n grain_ids.extend(np.array(line.split()).astype(int).tolist())\n print('grain ids are:', grain_ids)\n oridescriptor = f.readline().strip() # must be euler-bunge:passive\n if oridescriptor != 'euler-bunge:passive':\n print('Wrong orientation descriptor: %s, must be euler-bunge:passive' % oridescriptor)\n for i in range(n):\n euler_angles = np.array(f.readline().split()).astype(float).tolist()\n print('adding grain %d' % grain_ids[i])\n micro.grains.append(Grain(grain_ids[i], Orientation.from_euler(euler_angles)))\n # look for **data\n while True:\n line = f.readline().strip()\n if line.startswith('**data'):\n break\n print(f.tell())\n print('reading data from byte %d' % f.tell())\n data = np.fromfile(f, dtype=np.uint16)[:-4] # leave out the last 4 values\n print(data.shape)\n assert np.prod(dims) == data.shape[0]\n micro.set_grain_map(data.reshape(dims[::-1]).transpose(2, 1, 0), voxel_size[0]) # swap X/Z axes\n micro.recompute_grain_centers()\n print('done')\n return micro", "def load(hdf5_filename):\n # Expand filename to be absolute\n hdf5_filename = os.path.expanduser(hdf5_filename)\n\n try:\n f = h5py.File(hdf5_filename, \"r\")\n # neurodata stores data inside the 'cutout' h5 dataset\n data_layers = f.get('image').get('CUTOUT')\n except Exception as e:\n raise ValueError(\"Could not load file {0} for conversion. {}\".format(\n hdf5_filename, e))\n raise\n\n return numpy.array(data_layers)", "def H5ReadDataset(filename, node):\n f = h5py.File(filename, 'r')\n if node not in f:\n raise H5IndexError(node, [], 'Cannot index %s' % node)\n dataset = f[node].value\n f.close()\n return dataset", "def read(fd: BinaryIO) -> Entity:\n if fd.read(4) != b'MUGS':\n raise ValueError(\"not a valid mug file format\")\n\n return read_recursive(fd)", "def read_hdf5_file(self, file_name):\n # if file_name.endswith('.hdf5'):\n stat_file = h5py.File(config.stat_dir+'stats.hdf5', mode='r')\n\n max_feat = np.array(stat_file[\"feats_maximus\"])\n min_feat = np.array(stat_file[\"feats_minimus\"])\n stat_file.close()\n\n with h5py.File(config.voice_dir + file_name) as feat_file:\n\n feats = np.array(feat_file['feats'])[()]\n\n pho_target = np.array(feat_file[\"phonemes\"])[()]\n\n f0 = feats[:,-2]\n\n med = np.median(f0[f0 > 0])\n\n f0[f0==0] = med\n\n f0_nor = (f0 - min_feat[-2])/(max_feat[-2]-min_feat[-2])\n\n\n return feats, f0_nor, pho_target", "def _read_molly_head(mf):\n\n # If 'fbytes' in the next line comes up blank, we have reached the end of\n # the file\n fbytes = mf.read(4)\n if fbytes == '': return None\n\n # If it does not start with 44 in either big or little endian form,\n # something is wrong\n (nbyte,) = struct.unpack('<i', fbytes)\n if nbyte != 44:\n (nbyte,) = struct.unpack('>i', fbytes)\n if nbyte != 44:\n raise MollyError('_read_molly_header: not a molly spectrum: first 4 bytes = ' + str(nbyte) + ' not 44')\n border = '>'\n else:\n border = '<'\n\n # Read first line with various format items\n try:\n fcode,units,npix,narc,nchar,ndoub,nint,nfloat = \\\n struct.unpack(border + 'i16s6i',mf.read(44))\n except:\n raise MollyError(\"Failed to read first line of molly spectrum\")\n\n # skip bytes at end of first record and at start of second\n mf.seek(8,1)\n\n # read names of string header items\n cnames = []\n for i in range(nchar):\n name = mf.read(16).strip()\n cnames.append(name)\n\n # read names of double header items\n dnames = []\n for i in range(ndoub):\n name = mf.read(16).strip()\n dnames.append(name)\n\n # read names of integer header items\n inames = []\n for i in range(nint):\n name = mf.read(16).strip()\n inames.append(name)\n\n # read names of float header items\n fnames = []\n for i in range(nfloat):\n name = mf.read(16).strip()\n fnames.append(name)\n\n # skip bytes at end of second record and at start of third\n mf.seek(8,1)\n\n # create header\n head = fits.Header()\n\n for i in range(nchar):\n value = mf.read(32).strip()\n head['hierarch ' + cnames[i]] = value\n\n dvals = struct.unpack(border + str(ndoub) + 'd', mf.read(8*ndoub))\n for i in range(ndoub):\n head['hierarch ' + dnames[i]] = dvals[i]\n\n ivals = struct.unpack(border + str(nint) + 'i', mf.read(4*nint))\n for i in range(nint):\n head['hierarch ' + inames[i]] = ivals[i]\n\n fvals = struct.unpack(border + str(nfloat) + 'f', mf.read(4*nfloat))\n for i in range(nfloat):\n head['hierarch ' + fnames[i]] = np.float32(fvals[i])\n\n # skip bytes at end of third record and at start of fourth\n mf.seek(8,1)\n\n # set X array\n if narc != 0:\n arc = np.fromfile(file=mf, dtype=border + 'f8', count=abs(narc))\n x = np.polyval(arc[::-1], np.arange(1.,npix+1,1.)/npix)\n if narc < 0:\n x = np.exp(x)\n # correct to heliocentric scale\n if 'Vearth' in head:\n x *= (1.-head['Vearth']/(subs.C/1000.))\n head['comment'] = 'Wavelength scale is heliocentric'\n else:\n head['comment'] = 'Wavelength scale is possibly telluric'\n else:\n x = np.arange(1.,npix+1,1.)\n arc = None\n\n # skip 4 bytes at end of headers\n mf.seek(4,1)\n\n return (fcode, head, dnl.Axis('Wavelength', '\\A', x), narc, arc, border)", "def load_from_hdf5(h5group, path=None, ignore_unknown=True, exclude=None):\n return Hdf5Loader(h5group, ignore_unknown, exclude).load(path)", "def metadata_from_hdf5_file(h5py_filename, delete_afterwards=True):\n metadata = None\n open_mode = \"a\" if delete_afterwards else \"r\"\n with h5py.File(h5py_filename, mode=open_mode) as h5f:\n if 'data' in h5f:\n main_dset = h5f['data']\n if main_dset.is_virtual:\n metadata_list = list() # A list of dicts.\n\n # Now open the virtual sources and check there for the metadata.\n for source_tpl in main_dset.virtual_sources():\n with h5py.File(source_tpl.file_name, mode=open_mode) as h5f_virtual_part:\n if 'metadata' in h5f_virtual_part:\n virtual_metadata_grp = h5f_virtual_part['metadata']\n metadata_list.append(extract_md_group(virtual_metadata_grp))\n if delete_afterwards:\n del h5f_virtual_part['metadata']\n metadata = _merge_md_list(metadata_list)\n else:\n # the main_dset is not virtual, so just grab the metadata group from the file root.\n if 'metadata' in h5f:\n metadata = extract_md_group(h5f['metadata'])\n if delete_afterwards:\n del h5f['metadata']\n else:\n raise SPYValueError(\"'data' dataset in hd5f file {of}.\".format(of=h5py_filename), actual=\"no such dataset\")\n return metadata", "def read_dense_net(hdf5_file_name):\n\n return tf_keras.models.load_model(\n hdf5_file_name, custom_objects=METRIC_FUNCTION_DICT\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a microstructure from a neper tesselation. Neper is an open source program to generate polycristalline microstructure using voronoi tesselations.
def from_neper(neper_file_path): neper_file = neper_file_path.split(os.sep)[-1] print('creating microstructure from Neper tesselation %s' % neper_file) name, ext = os.path.splitext(neper_file) print(name, ext) assert ext == '.tesr' # assuming raster tesselation micro = Microstructure(name=name) with open(neper_file_path, 'r', encoding='latin-1') as f: line = f.readline() # ***tesr # look for **general while True: line = f.readline().strip() # get rid of unnecessary spaces if line.startswith('**general'): break dim = f.readline().strip() print(dim) dims = np.array(f.readline().split()).astype(int).tolist() print(dims) voxel_size = np.array(f.readline().split()).astype(float).tolist() print(voxel_size) # look for **cell while True: line = f.readline().strip() if line.startswith('**cell'): break n = int(f.readline().strip()) print('microstructure contains %d grains' % n) f.readline() # *id grain_ids = [] # look for *ori while True: line = f.readline().strip() if line.startswith('*ori'): break else: grain_ids.extend(np.array(line.split()).astype(int).tolist()) print('grain ids are:', grain_ids) oridescriptor = f.readline().strip() # must be euler-bunge:passive if oridescriptor != 'euler-bunge:passive': print('Wrong orientation descriptor: %s, must be euler-bunge:passive' % oridescriptor) for i in range(n): euler_angles = np.array(f.readline().split()).astype(float).tolist() print('adding grain %d' % grain_ids[i]) micro.grains.append(Grain(grain_ids[i], Orientation.from_euler(euler_angles))) # look for **data while True: line = f.readline().strip() if line.startswith('**data'): break print(f.tell()) print('reading data from byte %d' % f.tell()) data = np.fromfile(f, dtype=np.uint16)[:-4] # leave out the last 4 values print(data.shape) assert np.prod(dims) == data.shape[0] micro.set_grain_map(data.reshape(dims[::-1]).transpose(2, 1, 0), voxel_size[0]) # swap X/Z axes micro.recompute_grain_centers() print('done') return micro
[ "def create_schematic_from_mentor_netlist(self, file_to_import):\n xpos = 0\n ypos = 0\n delta = 0.0508\n use_instance = True\n my_netlist = []\n with open(file_to_import, 'r') as f:\n for line in f:\n my_netlist.append(line.split(\" \"))\n nets = [i for i in my_netlist if i[0] == \"NET\"]\n comps = [i for i in my_netlist if i[0] == \"COMP:\"]\n props = {}\n for el in my_netlist:\n if el[0] == \"COMP:\":\n n = el[2].strip()\n n = n[1:-1]\n props[n] = []\n i = my_netlist.index(el) + 1\n finished = False\n while not finished and i < len(my_netlist):\n if my_netlist[i][0] == \"Property:\":\n props[n].append(my_netlist[i][1])\n elif \"Pin:\" in my_netlist[i]:\n props[n].append(my_netlist[i])\n else:\n finished = True\n i += 1\n\n column_number = int(math.sqrt(len(comps)))\n for el in comps:\n name = el[2].strip() # Remove carriage return.\n name = name[1:-1] # Remove quotes.\n if len(el) > 3:\n comptype = el[3]\n else:\n comptype = self.retrieve_mentor_comp(el[2])\n value = \"required\"\n for prop in props[name]:\n if \"Value=\" in prop:\n value = prop.split(\"=\")[1].replace(\",\", \".\").strip()\n\n mycomp = None\n if \"resistor:RES.\" in comptype:\n mycomp, mycompname = self.modeler.components.create_resistor(name, value, xpos, ypos,\n use_instance_id_netlist=use_instance)\n elif \"inductor:COIL.\" in comptype:\n mycomp, mycompname = self.modeler.components.create_inductor(name, value, xpos, ypos,\n use_instance_id_netlist=use_instance)\n elif \"capacitor:CAP.\" in comptype:\n mycomp, mycompname = self.modeler.components.create_capacitor(name, value, xpos, ypos,\n use_instance_id_netlist=use_instance)\n elif \"transistor:NPN\" in comptype:\n mycomp, mycompname = self.modeler.components.create_npn(name, value, xpos, ypos,\n use_instance_id_netlist=use_instance)\n elif \"transistor:PNP\" in comptype:\n mycomp, mycompname = self.modeler.components.create_pnp(name, value, xpos, ypos,\n use_instance_id_netlist=use_instance)\n elif \"diode:\" in comptype:\n mycomp, mycompname = self.modeler.components.create_diode(name, value, xpos, ypos,\n use_instance_id_netlist=use_instance)\n\n if mycomp:\n pins = self.modeler.components.get_pins(mycomp)\n id = 1\n for pin in pins:\n pos = self.modeler.components.get_pin_location(mycomp, pin)\n if pos[0] < xpos:\n angle = 6.28318530717959\n else:\n angle = 3.14159265358979\n netname = None\n for net in nets:\n net = [i.strip() for i in net]\n if (name + \"-\" + str(id)) in net:\n fullnetname = net[2]\n netnames = fullnetname.split(\"/\")\n netname = netnames[len(netnames) - 1].replace(\",\", \"_\").replace(\"'\", \"\").replace(\"$\",\n \"\").strip()\n if not netname:\n prop = props[name]\n if \"Pin:\" in prop and id in prop:\n netname = prop[-1]\n netname = netname.replace(\"$\", \"\")\n\n if netname:\n self.modeler.components.create_page_port(netname, pos[0], pos[1], angle)\n else:\n self._messenger.add_info_message(\"Page Port Not Created\", \"Global\")\n id += 1\n ypos += delta\n if ypos > delta * (column_number):\n xpos += delta\n ypos = 0\n\n for el in nets:\n netname = el[2][1:-1]\n netname = netname.replace(\"$\", \"\")\n if \"GND\" in netname.upper():\n self.modeler.components.create_gnd(xpos, ypos)\n page_pos = ypos + 0.00254\n id, name = self.modeler.components.create_page_port(\n netname, xpos, ypos, 6.28318530717959)\n mod1 = self.modeler.components[id]\n mod1.set_location(str(xpos) + \"meter\", str(page_pos) + \"meter\")\n ypos += delta\n if ypos > delta * column_number:\n xpos += delta\n ypos = 0\n\n return True", "def voronoi_tessellation(x,y,mol,lx,ly,natoms,nmol):\n ### generate input script for voro++\n ofile = open('voro.data', 'w')\n for i in range(natoms):\n ofile.write(str(i) + ' ' + str(x[i]) + ' ' + str(y[i]) + ' 0.5\\n')\n ofile.close()\n ### perform Voronoi tessellation using voro++\n os.system('/usr/users/iff_th2/isele/Applications/voro++-0.4.6/src/voro++ -p 0.0 ' + str(lx) + ' 0.0 ' + str(ly) + ' 0.0 1.0 voro.data')\n ### read in the results\n vol = np.zeros((nmol))\n ifile = open('voro.data.vol')\n for i in range(natoms):\n line = ifile.readline()\n line = line.split()\n idx = int(line[0])\n v = float(line[4])\n vol[mol[idx]] += v\n ifile.close()\n ### remove voro++ files\n os.system('rm voro.data voro.data.vol')\n return vol", "def construct_d_vine(nodes):", "def _make_model(n_poles, c_m, p_m, p_l_1, p_l_2, p_r, dt):\n ######## Hacky modification to change dynamical system parameters\n xml_string = read_model('cartpole.xml').decode(\"utf-8\")\n id_timestep = ([m.start() for m in re.finditer('timestep=', xml_string)])\n id_length = ([m.start() for m in re.finditer('fromto=', xml_string)])\n ids_mass = ([m.start() for m in re.finditer('mass=', xml_string)])\n new_xml_string = xml_string[:id_timestep[0]] + \"timestep=\\\"\" + str(dt) + \"\\\"\" + xml_string[\n id_timestep[0] + 14:id_length[\n 0]] + \"fromto=\\\"0 0 0 0 0 \" + str(\n p_l_1) + \"\\\" size=\\\"\" + str(p_r) + \"\\\"\" + xml_string[id_length[0] + 33:ids_mass[\n 0]] + \"mass=\\\"\" + str(p_m) + \"\\\"\" + xml_string[\n ids_mass[0] + 9:ids_mass[1]] + \"mass=\\\"\" + str(\n c_m) + \"\\\"\" + xml_string[\n ids_mass[1] + 8:]\n xml_string = new_xml_string\n if n_poles == 1:\n return xml_string\n mjcf = etree.fromstring(xml_string)\n parent = mjcf.find('./worldbody/body/body') # Find first pole.\n # Make chain of poles.\n for pole_index in range(2, n_poles + 1):\n child = etree.Element('body', name='pole_{}'.format(pole_index),\n pos='0 0 ' + str(p_l_1), childclass='pole')\n etree.SubElement(child, 'joint', name='hinge_{}'.format(pole_index))\n etree.SubElement(child, 'geom', name='pole_{}'.format(pole_index), fromto=\"0 0 0 0 0 \" + str(p_l_2))\n parent.append(child)\n parent = child\n\n # Move plane down.\n #floor = mjcf.find('./worldbody/geom')\n #floor.set('pos', '0 0 {}'.format(1 - n_poles - .05))\n # Move cameras back.\n cameras = mjcf.findall('./worldbody/camera')\n cameras[0].set('pos', '0 {} 1'.format(-1 - 2 * n_poles))\n cameras[1].set('pos', '0 {} 2'.format(-2 * n_poles))\n result = etree.tostring(mjcf, pretty_print=True)\n # print(result)\n return result", "def mixed_type_ug():\n points = array([[0,0,0], [1,0,0], [0,1,0], [0,0,1], # tetra\n [2,0,0], [3,0,0], [3,1,0], [2,1,0],\n [2,0,1], [3,0,1], [3,1,1], [2,1,1], # Hex\n ], 'f')\n # shift the points so we can show both.\n points[:,1] += 2.0\n # The cells\n cells = array([4, 0, 1, 2, 3, # tetra\n 8, 4, 5, 6, 7, 8, 9, 10, 11 # hex\n ])\n # The offsets for the cells, i.e. the indices where the cells\n # start.\n offset = array([0, 5])\n tetra_type = tvtk.Tetra().cell_type # VTK_TETRA == 10\n hex_type = tvtk.Hexahedron().cell_type # VTK_HEXAHEDRON == 12\n cell_types = array([tetra_type, hex_type])\n # Create the array of cells unambiguously.\n cell_array = tvtk.CellArray()\n cell_array.set_cells(2, cells)\n # Now create the UG.\n ug = tvtk.UnstructuredGrid(points=points)\n # Now just set the cell types and reuse the ug locations and cells.\n ug.set_cells(cell_types, offset, cell_array)\n return ug", "def triangular_prism():\n return nx.read_gml(abs_path('gml/triangular_prism.gml'))", "def _create_wn(self, append=None):\n # Convert the WaterNetworkGIS to a dictionary\n wn_dict = {}\n wn_dict['nodes'] = []\n wn_dict['links'] = []\n\n for element in [self.junctions, self.tanks, self.reservoirs]:\n if element.shape[0] > 0:\n assert (element['geometry'].geom_type).isin(['Point']).all()\n df = element.reset_index()\n df.rename(columns={'index':'name', 'geometry':'coordinates'}, inplace=True)\n df['coordinates'] = [[x,y] for x,y in zip(df['coordinates'].x, \n df['coordinates'].y)]\n wn_dict['nodes'].extend(df.to_dict('records'))\n\n for element in [self.pipes, self.pumps, self.valves]:\n if element.shape[0] > 0:\n assert 'start_node_name' in element.columns\n assert 'end_node_name' in element.columns\n df = element.reset_index()\n df.rename(columns={'index':'name'}, inplace=True)\n # TODO: create vertices from LineString geometry\n df.drop(columns=['geometry'], inplace=True)\n wn_dict['links'].extend(df.to_dict('records'))\n \n # Create WaterNetworkModel from dictionary\n from wntr.network import from_dict\n wn = from_dict(wn_dict, append)\n \n return wn", "def PolyDataToUnstructuredGrid(poly):\n \n ugrid = vtk.vtkUnstructuredGrid()\n \n # Add the points\n ugrid.SetPoints(poly.GetPoints())\n # Add the cells\n for i in range(poly.GetNumberOfCells()):\n cellType = poly.GetCellType(i)\n cell = poly.GetCell(i)\n ugrid.InsertNextCell(cellType, cell.GetPointIds())\n # Add the point data\n for i in range(poly.GetPointData().GetNumberOfArrays()):\n ugrid.GetPointData().AddArray(poly.GetPointData().GetArray(i))\n # Add the cell data\n for i in range(poly.GetCellData().GetNumberOfArrays()):\n ugrid.GetCellData().AddArray(poly.GetCellData().GetArray(i))\n \n return ugrid", "def build_obj(self, universe_number, parsed_json):\r\n\r\n filename = 'chaperone_visualization' + str(universe_number) + '.obj'\r\n with open(filename, 'w+') as data_file:\r\n wall_counter = 0\r\n for room in parsed_json['universes']:\r\n # Parse each wall\r\n for wall in room['collision_bounds']:\r\n\r\n # We can assume there will be four corners to a wall\r\n for i in range(0, 4):\r\n\r\n # Prefix to specify a vertex\r\n data_file.write('v ')\r\n\r\n # Points are represented as x (left/right), y (verticle), z (front/back)\r\n coords = []\r\n for j in range(0, 3):\r\n data_file.write(str(wall[i][j]) + ' ')\r\n coords.append(wall[i][j])\r\n data_file.write('\\n') # Add space to group walls\r\n self.visualizer.add_vert(coords[0], coords[1], coords[2])\r\n wall_counter += 1\r\n\r\n data_file.write('\\n') # Space to separate vertices from faces\r\n for face in range(0, wall_counter):\r\n # Prefix to represent the line specifies a face\r\n data_file.write('f ')\r\n\r\n self.visualizer.add_edge(4 * face, 4 * face + 1)\r\n self.visualizer.add_edge(4 * face + 1, 4 * face + 2)\r\n self.visualizer.add_edge(4 * face + 2, 4 * face + 3)\r\n self.visualizer.add_edge(4 * face + 3, 4 * face)\r\n\r\n # obj file format refers to the first vertex as 1\r\n # We can assume that all faces can be represented as quads\r\n for i in range(1, 5):\r\n data_file.write(str(4 * face + i) + ' ')\r\n\r\n data_file.write('\\n')", "def create_file_ut2(trajectory_nodes: list) -> None:\n\n # Create new empty .ut2 file based on template\n rastr.Save('traj.ut2', 'rastr_file_patterns/траектория утяжеления.ut2')\n # Open the created file\n rastr.Load(1, 'traj.ut2', 'rastr_file_patterns/траектория утяжеления.ut2')\n\n # Redefining objects RastrWin3\n trajectory = rastr.Tables('ut_node')\n\n # Just in case clear rows in .ut2\n trajectory.DelRows()\n\n # Fill a .ut2 list of nodes forms trajectory\n # To avoid duplicates of nodes\n # create empty dictionary this is intended for nodes\n # that can be generator and load at the same\n node_data = {} # create empty dictionary\n i = 0\n for node in trajectory_nodes:\n node_type = node['variable'] # Pg - generator / Pn - load\n node_number = node['node']\n power_change = float(node['value'])\n power_tg = node['tg'] # Load's power factor\n\n # Check whether the dictionary contains a node\n if node_number not in node_data:\n # Create a pair node number - index\n node_data[node_number] = i\n i += 1\n # Fill row in .ut2\n trajectory.AddRow()\n trajectory.Cols('ny').SetZ(node_data[node_number],\n node_number)\n trajectory.Cols(node_type).SetZ(node_data[node_number],\n power_change)\n else:\n # Find existing pair and add to existing row in .ut2\n trajectory.Cols(node_type).SetZ(node_data[node_number],\n power_change)\n\n # Try add load's power factor\n if trajectory.Cols('tg').Z(node_data[node_number]) == 0:\n trajectory.Cols('tg').SetZ(node_data[node_number], power_tg)\n\n # Resave .ut2 file\n rastr.Save('traj.ut2', 'rastr_file_patterns/траектория утяжеления.ut2')", "def constructVine(self):\n # 0th tree build\n tree0 = Ctree(self.data, lvl=0, trial_copula=self.trial_copula_dict)\n tree0.seqCopulaFit()\n self.vine.append(tree0)\n # build all other trees\n self.buildDeepTrees()", "def create_tree(raw_tree, Samples, index):\n\t#initialize index of sample\n\tcount = 0\n\tif count == index: count += 1 #index to be skipped\n\t#initialize final tree\n\tfinal_tree = Tree()\n\t#add each sample to final tree in proper format\n\tfor origin in raw_tree:\n\t\t#add node\n\t\tfinal_tree.nodes.append(Node(origin, Samples[count]))\n\t\t#add to index\n\t\tcount += 1\n\t\tif count == index: count += 1 #index to be skipped\n\t#don't append tree if has loops\n\tfinal_tree.to_dict()\n\tif final_tree.loops(): return None\n\t#if pairs of samples from same time point exist, change the format to include and internode\n\tfinal_tree = get_internodes(final_tree)\n if final_tree.double_progenitor(): return None\n\t#sort nodes\n\tfinal_tree.sort_nodes()\n\t#return\n\treturn final_tree", "def create_testStim(PE,NPE):\n\tTARGET='X'\n\t# Create stimuli dict\n\ttest=dict(test=[[[PE,TARGET],[NPE,TARGET]]*20], filler=['D','M','T','V']*59,noncued=[[['D',TARGET],['M',TARGET],['T',TARGET],['V',TARGET]]*5])\n\n\t# Create list from dictionary test items\n\ttest_list=[]\n\tfor levels in test.get('test'): \n\t\ttest_list+=levels\n\ttest_list+=test.get('filler')\n\tfor levels in test.get('noncued'): \n\t\ttest_list+=levels\n\n\t# Shuffle List\n\tsudoShuffle(test_list)\n\n\t# Flatten nested List\n\tflattened = [val for sublist in test_list for val in sublist]\n\n\t# Create list holding conditions etc\n\tconds=[]\n\tfor i in range(0,len(flattened)):\n\t\tif flattened[i] == 'X' and flattened[i-1] in test['filler']:\n\t\t\tconds.append('NONCUED')\n\t\telif flattened[i] == PE:\n\t\t\tconds.append('PE')\n\t\telif flattened[i] == NPE:\n\t\t\tconds.append('NPE')\n\t\telif flattened[i] == TARGET:\n\t\t\tconds.append('TARGET')\n\t\telse:\n\t\t\tconds.append('FILLER')\n\t\n\tfor i in range(0,len(conds)):\n\t\tif conds[i]=='NONCUED':\n\t\t\tconds[i-1]=conds[i]\n\t\t\tconds[i]='TARGET'\n\n\t# Combine stimuli and stimuli type into dataframe\n\tconditions=pd.DataFrame({'stimuli': flattened, 'conditions': conds})\n\treturn conditions", "def VtuNeList(vtu):\n \n nodeCount = vtu.ugrid.GetNumberOfPoints()\n \n neList = []\n for i in range(nodeCount):\n pointCells = vtu.GetPointCells(i)\n neList.append(pointCells)\n \n return neList", "def reflector_universes():\n # Create dictionary to store universes\n univs = {}\n\n # Reflector at northwest corner (fuel assemblies to the right and below)\n width = 276\n p1 = 59\n p2 = 126\n p3 = 196\n p4 = 264\n\n p5 = 105\n\n p6 = 122\n p7 = 164\n\n p8 = 138\n p9 = 222\n\n p10 = 247\n\n # There are 8 large water holes and all others appear to have the same, smaller\n # diameter\n d_small = 13\n d_large = 30\n\n # All pixel widths are scaled according to the actual width of an assembly\n # divided by the width of an assembly in pixels\n lattice_pitch = surfaces.lattice_pitch\n scale = lattice_pitch/width\n\n # Physical positions\n x1 = -lattice_pitch/2 + scale*(width - p4)\n x2 = -lattice_pitch/2 + scale*(width - p3)\n x3 = -lattice_pitch/2 + scale*(width - p2)\n x4 = -lattice_pitch/2 + scale*(width - p1)\n y1 = -lattice_pitch/2 + scale*p1\n y2 = -lattice_pitch/2 + scale*p2\n y3 = -lattice_pitch/2 + scale*p3\n y4 = -lattice_pitch/2 + scale*p4\n\n x5 = -lattice_pitch/2 + scale*(width - p5)\n y5 = -lattice_pitch/2 + scale*p5\n x6 = -lattice_pitch/2 + scale*(width - p7)\n y6 = -lattice_pitch/2 + scale*p6\n x7 = -lattice_pitch/2 + scale*(width - p6)\n y7 = -lattice_pitch/2 + scale*p7\n x8 = -lattice_pitch/2 + scale*(width - p9)\n y8 = -lattice_pitch/2 + scale*p8\n x9 = -lattice_pitch/2 + scale*(width - p8)\n y9 = -lattice_pitch/2 + scale*p9\n\n y10 = -lattice_pitch/2 + scale*p10\n\n # Radius of small/large water holes\n r1 = scale*d_small/2\n r2 = scale*d_large/2\n\n params = [\n (x1, y1, r1), (x2, y1, r1), (x3, y1, r1), (x4, y1, r2),\n (x4, y2, r1), (x4, y3, r1), (x4, y4, r1), (x5, y5, r1),\n (x6, y6, r1), (x7, y7, r1), (x8, y8, r1), (x9, y9, r1),\n (x1, y10, r1)\n ]\n univs['NW'] = make_reflector('NW', params)\n\n # Reflector at (1, 1)\n\n params = [\n (x4, y1, r1),\n (lattice_pitch/2 - scale*103, -lattice_pitch/2 + scale*156, r1),\n (lattice_pitch/2 - scale*158, -lattice_pitch/2 + scale*103, r1)\n ]\n univs['1,1'] = make_reflector('1,1', params)\n\n # Left reflector (4,0)\n\n left1 = 58\n left2 = 118\n left3 = 173\n up3 = 76\n\n x1 = -lattice_pitch/2 + scale*(width - left1)\n x2 = -lattice_pitch/2 + scale*(width - left2)\n d_y = scale*67\n x3 = -lattice_pitch/2 + scale*(width - left3)\n y3 = scale*up3\n\n params = [\n (x1, 0, r1), (x1, d_y, r1), (x1, 2*d_y, r1), (x1, -d_y, r1), (x1, -2*d_y, r1),\n (x2, d_y/2, r1), (x2, 3/2*d_y, r1), (x2, -d_y/2, r1), (x2, -3/2*d_y, r1),\n (x3, y3, r1), (x3, -y3, r1)\n ]\n univs['4,0'] = make_reflector('4,0', params)\n\n # Reflector at (3,0)\n\n params = []\n for i in range(2, 7):\n params.append((x1, i*d_y - lattice_pitch, r1))\n for i in (5, 7, 11):\n params.append((x2, i*d_y/2 - lattice_pitch, r1))\n\n left3 = 140\n left4 = 183\n up3 = 159\n up4 = 47\n\n x3 = -lattice_pitch/2 + scale*(width - left3)\n y3 = -lattice_pitch/2 + scale*up3\n x4 = -lattice_pitch/2 + scale*(width - left4)\n y4 = -lattice_pitch/2 + scale*up4\n params += [(x3, y3, r1), (x4, y4, r1)]\n\n univs['3,0'] = make_reflector('3,0', params)\n\n # Reflector at (5,0)\n params = [(x, -y, r) for x, y, r in params]\n univs['5,0'] = make_reflector('5,0', params)\n\n # Reflector at (2, 0)\n\n params = [(-lattice_pitch/2 + scale*(width - 78),\n -lattice_pitch/2 + scale*98, r1)]\n univs['2,0'] = make_reflector('2,0', params)\n\n ################################################################################\n # Beyond this point, all universes are just copies of the ones previously\n # created with a rotation applied\n\n # First define helper function to create new universe by rotating an\n # existing one\n def rotate_universe(univ, rotation, name):\n cell = openmc.Cell(name='reflector {}'.format(name), fill=univ)\n cell.rotation = rotation\n return openmc.Universe(name=name, cells=[cell])\n\n univs['NE'] = rotate_universe(univs['NW'], (0, 0, -90), 'NE')\n univs['SW'] = rotate_universe(univs['NW'], (0, 0, 90), 'SW')\n univs['SE'] = rotate_universe(univs['NW'], (0, 0, 180), 'SE')\n univs['0,2'] = rotate_universe(univs['2,0'], (0, 180, -90), '0,2')\n univs['0,3'] = rotate_universe(univs['5,0'], (0, 0, -90), '0,3')\n univs['0,4'] = rotate_universe(univs['4,0'], (0, 0, -90), '0,4')\n univs['0,5'] = rotate_universe(univs['3,0'], (0, 0, -90), '0,5')\n univs['0,6'] = rotate_universe(univs['2,0'], (0, 0, -90), '0,6')\n univs['1,7'] = rotate_universe(univs['1,1'], (0, 0, -90), '1,7')\n univs['2,8'] = rotate_universe(univs['2,0'], (0, 180, 0), '2,8')\n univs['3,8'] = rotate_universe(univs['3,0'], (0, 180, 0), '3,8')\n univs['4,8'] = rotate_universe(univs['4,0'], (0, 180, 0), '4,8')\n univs['5,8'] = rotate_universe(univs['3,0'], (0, 0, 180), '5,8')\n univs['6,0'] = rotate_universe(univs['2,0'], (180, 0, 0), '6,0')\n univs['6,8'] = rotate_universe(univs['2,0'], (0, 0, 180), '6,8')\n univs['7,1'] = rotate_universe(univs['1,1'], (180, 0, 0), '7,1')\n univs['7,7'] = rotate_universe(univs['1,1'], (0, 0, 180), '7,7')\n univs['8,2'] = rotate_universe(univs['2,0'], (0, 0, 90), '8,2')\n univs['8,3'] = rotate_universe(univs['3,0'], (0, 0, 90), '8,3')\n univs['8,4'] = rotate_universe(univs['4,0'], (0, 0, 90), '8,4')\n univs['8,5'] = rotate_universe(univs['5,0'], (0, 0, 90), '8,5')\n univs['8,6'] = rotate_universe(univs['2,0'], (0, 0, 180), '8,6')\n\n # Solid stainless steel universe\n all_ss = openmc.Cell(name='heavy reflector', fill=mats['SS'])\n univs['solid'] = openmc.Universe(name='solid', cells=[all_ss])\n\n return univs", "def neper_tessellation(fname, number_of_cells, rve_size=1):\r\n command = \"neper -T \\\r\n -n {0:d} \\\r\n -domain 'cube({1:d},{1:d},{1:d})' \\\r\n -periodicity x,y,z \\\r\n -morpho voronoi \\\r\n -morphooptiini 'coo:file(centers.txt),weight:file(rads.txt)' \\\r\n -o {2}Tessellation -format tess,geo \\\r\n -statcell vol -statedge length -statface area \\\r\n -statver x\".format(number_of_cells, rve_size, fname)\r\n sp.Popen(sx.split(command)).wait()", "def nuDeuteriumxsection_NC_Tbl(Enu,neu,return_interpolator = False):\n if neu == 0:\n print \"Missing nu-Deuterium cross section.\"\n elif neu == 1: \n # NOTE : here the cross section was given in units of 10^{-42}\n E1 = np.array(np.append(np.append(np.append(np.append(np.arange(2.0,12.2,0.2),np.arange(12.5,25.5,0.5)),np.arange(26.0,56.0,1.0)),np.arange(60.0,105.0,5.0)),np.arange(110.0,180.0,10.0)))\n sigNC1 = np.array([1.e-30,1.e-30,4.362e-5,4.253e-4,1.451e-3,3.334e-3,6.236e-3,1.028e-2,1.557e-2,2.219e-2,3.021e-2,3.967e-2,5.064e-2,6.314e-2,7.722e-2,9.290e-2,1.102e-1,1.292e-1,1.498e-1,1.721e-1,1.961e-1,2.218e-1,2.491e-1,2.782e-1,3.102e-1,3.430e-1,3.776e-1,4.140e-1,4.522e-1,4.921e-1,5.339e-1,5.775e-1,6.228e-1,6.700e-1,7.189e-1,7.697e-1,8.223e-1,8.767e-1,9.328e-1,9.908e-1,1.051,1.112,1.176,1.241,1.308,1.377,1.447,1.520,1.594,1.670,1.748,1.950,2.164,2.389,2.625,2.872,3.130,3.399,3.679,3.969,4.271,4.584,4.907,5.241,5.585,5.940,6.306,6.682,7.069,7.466,7.873,8.291,8.719,9.157,9.606,1.006e1,1.053e1,1.150e1,1.251e1,1.355e1,1.464e1,1.576e1,1.692e1,1.812e1,1.936e1,2.063e1,2.194e1,2.329e1,2.467e1,2.609e1,2.754e1,2.902e1,3.055e1,3.210e1,3.368e1,3.530e1,3.695e1,3.864e1,4.035e1,4.210e1,4.387e1,4.568e1,4.751e1,4.937e1,5.126e1,5.318e1,5.513e1,6.525e1,7.599e1,8.728e1,9.906e1,1.113e2,1.239e2,1.368e2,1.500e2,1.634e2,1.908e2,2.186e2,2.465e2,2.744e2,3.020e2,3.292e2,3.559e2])\n inter=interpolate.interp1d(E1,sigNC1*1.0e-42*pc.cm**2)\n if return_interpolador :\n return inter\n else :\n return inter(Enu)\n else : \n print \"Invalid cross section neutrino type.\"", "def defineTemplate(parts, genes):\n prom = []\n ori = []\n for i in parts.index:\n ptype = parts.loc[i,'Type']\n name = parts.loc[i,'Name']\n if ptype == 'promoter':\n prom.append(name)\n elif ptype == 'origin':\n ori.append(name)\n for i in range(0,len(prom)):\n prom.append(None)\n tree = []\n gdict = {}\n for i in genes.index:\n name = genes.loc[i,'Name']\n step = \"gene%00d\" % (int(genes.loc[i,'Step']),)\n if step not in tree:\n tree.append(step) \n if step not in gdict:\n gdict[step] = []\n gdict[step].append(name)\n doe = doeTemplate(tree, origins=ori, promoters=prom, genes=gdict, positional=False)\n return doe, parts, genes", "def rhombicuboctahedron():\n return nx.read_gml(abs_path('gml/rhombicuboctahedron.gml'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a microstructure from a DCT reconstruction. DCT reconstructions are stored in several files. The indexed grain informations are stored in a matlab file in the '4_grains/phase_01' folder. Then, the reconstructed volume file (labeled image) is stored in the '5_reconstruction' folder as an hdf5 file, possibly stored alongside a mask file coming from the absorption reconstruction.
def from_dct(data_dir='.', grain_file='index.mat', vol_file='phase_01_vol.mat', mask_file='volume_mask.mat', use_dct_path=True, verbose=True): if data_dir == '.': data_dir = os.getcwd() if data_dir.endswith(os.sep): data_dir = data_dir[:-1] scan = data_dir.split(os.sep)[-1] print('creating microstructure for DCT scan %s' % scan) micro = Microstructure(name=scan) micro.data_dir = data_dir if use_dct_path: index_path = os.path.join(data_dir, '4_grains', 'phase_01', grain_file) else: index_path = os.path.join(data_dir, grain_file) print(index_path) if not os.path.exists(index_path): raise ValueError('%s not found, please specify a valid path to the grain file.' % index_path) return None from scipy.io import loadmat index = loadmat(index_path) micro.voxel_size = index['cryst'][0][0][25][0][0] # grab the crystal lattice lattice_params = index['cryst'][0][0][3][0] sym = Symmetry.from_string(index['cryst'][0][0][7][0]) print('creating crystal lattice {} ({}) with parameters {}'.format(index['cryst'][0][0][0][0], sym, lattice_params)) lattice_params[:3] /= 10 # angstrom to nm lattice = Lattice.from_parameters(*lattice_params, symmetry=sym) micro.set_lattice(lattice) # add all grains to the microstructure for i in range(len(index['grain'][0])): gid = index['grain'][0][i][0][0][0][0][0] rod = index['grain'][0][i][0][0][3][0] g = Grain(gid, Orientation.from_rodrigues(rod)) g.center = index['grain'][0][i][0][0][15][0] micro.grains.append(g) # load the grain map if available if use_dct_path: grain_map_path = os.path.join(data_dir, '5_reconstruction', vol_file) else: grain_map_path = os.path.join(data_dir, vol_file) if os.path.exists(grain_map_path): with h5py.File(grain_map_path, 'r') as f: # because how matlab writes the data, we need to swap X and Z axes in the DCT volume micro.grain_map = f['vol'][()].transpose(2, 1, 0) if verbose: print('loaded grain ids volume with shape: {}'.format(micro.grain_map.shape)) # load the mask if available if use_dct_path: mask_path = os.path.join(data_dir, '5_reconstruction', mask_file) else: mask_path = os.path.join(data_dir, mask_file) if os.path.exists(mask_path): try: with h5py.File(mask_path, 'r') as f: micro.mask = f['vol'][()].transpose(2, 1, 0).astype(np.uint8) except: # fallback on matlab format micro.mask = loadmat(mask_path)['vol'] if verbose: print('loaded mask volume with shape: {}'.format(micro.mask.shape)) return micro
[ "def from_neper(neper_file_path):\n neper_file = neper_file_path.split(os.sep)[-1]\n print('creating microstructure from Neper tesselation %s' % neper_file)\n name, ext = os.path.splitext(neper_file)\n print(name, ext)\n assert ext == '.tesr' # assuming raster tesselation\n micro = Microstructure(name=name)\n with open(neper_file_path, 'r', encoding='latin-1') as f:\n line = f.readline() # ***tesr\n # look for **general\n while True:\n line = f.readline().strip() # get rid of unnecessary spaces\n if line.startswith('**general'):\n break\n dim = f.readline().strip()\n print(dim)\n dims = np.array(f.readline().split()).astype(int).tolist()\n print(dims)\n voxel_size = np.array(f.readline().split()).astype(float).tolist()\n print(voxel_size)\n # look for **cell\n while True:\n line = f.readline().strip()\n if line.startswith('**cell'):\n break\n n = int(f.readline().strip())\n print('microstructure contains %d grains' % n)\n f.readline() # *id\n grain_ids = []\n # look for *ori\n while True:\n line = f.readline().strip()\n if line.startswith('*ori'):\n break\n else:\n grain_ids.extend(np.array(line.split()).astype(int).tolist())\n print('grain ids are:', grain_ids)\n oridescriptor = f.readline().strip() # must be euler-bunge:passive\n if oridescriptor != 'euler-bunge:passive':\n print('Wrong orientation descriptor: %s, must be euler-bunge:passive' % oridescriptor)\n for i in range(n):\n euler_angles = np.array(f.readline().split()).astype(float).tolist()\n print('adding grain %d' % grain_ids[i])\n micro.grains.append(Grain(grain_ids[i], Orientation.from_euler(euler_angles)))\n # look for **data\n while True:\n line = f.readline().strip()\n if line.startswith('**data'):\n break\n print(f.tell())\n print('reading data from byte %d' % f.tell())\n data = np.fromfile(f, dtype=np.uint16)[:-4] # leave out the last 4 values\n print(data.shape)\n assert np.prod(dims) == data.shape[0]\n micro.set_grain_map(data.reshape(dims[::-1]).transpose(2, 1, 0), voxel_size[0]) # swap X/Z axes\n micro.recompute_grain_centers()\n print('done')\n return micro", "def from_h5(file_path):\n with h5py.File(file_path, 'r') as f:\n micro = Microstructure(name=f.attrs['microstructure_name'])\n if 'symmetry' in f['EnsembleData/CrystalStructure'].attrs:\n sym = f['EnsembleData/CrystalStructure'].attrs['symmetry']\n parameters = f['EnsembleData/CrystalStructure/LatticeParameters'][()]\n micro.set_lattice(Lattice.from_symmetry(Symmetry.from_string(sym), parameters))\n if 'data_dir' in f.attrs:\n micro.data_dir = f.attrs['data_dir']\n # load feature data\n if 'R_vectors' in f['FeatureData']:\n print('some grains')\n avg_rods = f['FeatureData/R_vectors'][()]\n print(avg_rods.shape)\n if 'grain_ids' in f['FeatureData']:\n grain_ids = f['FeatureData/grain_ids'][()]\n else:\n grain_ids = range(1, 1 + avg_rods.shape[0])\n if 'centers' in f['FeatureData']:\n centers = f['FeatureData/centers'][()]\n else:\n centers = np.zeros_like(avg_rods)\n for i in range(avg_rods.shape[0]):\n g = Grain(grain_ids[i], Orientation.from_rodrigues(avg_rods[i, :]))\n g.center = centers[i]\n micro.grains.append(g)\n # load cell data\n if 'grain_ids' in f['CellData']:\n micro.grain_map = f['CellData/grain_ids'][()]\n if 'voxel_size' in f['CellData/grain_ids'].attrs:\n micro.voxel_size = f['CellData/grain_ids'].attrs['voxel_size']\n if 'mask' in f['CellData']:\n micro.mask = f['CellData/mask'][()]\n if 'voxel_size' in f['CellData/mask'].attrs:\n micro.voxel_size = f['CellData/mask'].attrs['voxel_size']\n return micro", "def from_grain_file(grain_file_path, col_id=0, col_phi1=1, col_phi=2, col_phi2=3, col_x=4, col_y=5, col_z=None, col_volume=None):\n # get the file name without extension\n name = os.path.splitext(os.path.basename(grain_file_path))[0]\n print('creating microstructure %s' % name)\n micro = Microstructure(name=name)\n\n # read grain infos from the grain file\n grains_EBSD = np.genfromtxt(grain_file_path)\n for i in range(len(grains_EBSD)):\n o = Orientation.from_euler([grains_EBSD[i, col_phi1], grains_EBSD[i, col_phi], grains_EBSD[i, col_phi2]])\n g = Grain(int(grains_EBSD[i, col_id]), o)\n z = grains_EBSD[i, col_z] if col_z else 0.\n g.position = np.array([grains_EBSD[i, col_x], grains_EBSD[i, col_y], z])\n if col_volume:\n g.volume = grains_EBSD[i, col_volume]\n micro.grains.append(g)\n return micro", "def from_dct(label=1, data_dir='.'):\n grain_path = os.path.join(data_dir, '4_grains', 'phase_01', 'grain_%04d.mat' % label)\n grain_info = h5py.File(grain_path)\n g = Grain(label, Orientation.from_rodrigues(grain_info['R_vector'].value))\n g.center = grain_info['center'].value\n # add spatial representation of the grain if reconstruction is available\n grain_map_path = os.path.join(data_dir, '5_reconstruction', 'phase_01_vol.mat')\n if os.path.exists(grain_map_path):\n with h5py.File(grain_map_path, 'r') as f:\n # because how matlab writes the data, we need to swap X and Z axes in the DCT volume\n vol = f['vol'].value.transpose(2, 1, 0)\n from scipy import ndimage\n grain_data = vol[ndimage.find_objects(vol == label)[0]]\n g.volume = ndimage.measurements.sum(vol == label)\n # create the vtk representation of the grain\n g.add_vtk_mesh(grain_data, contour=False)\n return g", "def get_tomo_MFR(data_directory, subsampling = False):\r\n\r\n\t# output *.hdf file name, will be stored in the same directory as the *.mat files\r\n\toutput_fname = data_directory + 'tomo_COMPASS.hdf'\r\n\tnew_file = h5py.File(output_fname, 'w')\r\n\r\n\tn_reconstr = 0\r\n\r\n\tfor filename in glob.glob(data_directory + '*.mat'):\r\n\r\n\t\tpulse = filename[filename.find('shot')+len('shot'):filename.find('_reg')]\r\n\r\n\t\tprint filename\r\n\r\n\t\tf = scipy.io.loadmat(filename)\r\n\r\n\t\t#------------------------------------------------------------------ \r\n\t\t# COMPASS Matlab File dictionary keys meaning\r\n\t\t#\r\n\t\t# G_final\t- reconstructions (#time_steps, #pixels)\r\n\t\t# tvec\t\t- time vector \r\n\t\t# CHI2 \t\t- chi^2 values (~1)\r\n\t\t# Y \t\t- input signal from detectors (#time_steps, #detectors)\r\n\t\t# Yrfit \t- virtual signals from detectors after reconstruction\r\n\t\t# dets \t\t- list of detectors used\r\n\t\t# dY \t\t- expected error of detectors\r\n\t\t#------------------------------------------------------------------\r\n\r\n\t\tprint f.keys()\r\n\r\n\t\tt = f['tvec'][:][0]\r\n\t\ttomo = np.swapaxes(np.swapaxes(f['G_final'][:],0,2),1,2)\r\n\t\tSXR = f['Y'][:]\r\n\t\teSXR = f['dY'][:]\r\n\t\tCHI2 = f['CHI2'][:,0]\r\n\t\tSXRfit = f['Yrfit'][:]\r\n\r\n\t\tprint pulse, t.shape, t.dtype, tomo.shape, tomo.dtype, SXR.shape, SXR.dtype, CHI2.shape,\\\r\n\t\teSXR.shape, eSXR.dtype\r\n\r\n\t\tindex = abs(CHI2-1) < 0.05\r\n\r\n\t\ttomo = tomo[index,:,:]\r\n\t\tSXR = SXR[index,:]\r\n\t\teSXR = eSXR[index,:]\r\n\t\tSXRfit = SXRfit[index,:]\r\n\t\tt = t[index]\r\n\t\tCHI2 = CHI2[index]\r\n\r\n\t\tif subsampling :\r\n\t\t\t\r\n\t\t\tassert isinstance(subsampling, int)\r\n\r\n\t\t\tindex = [i%subsampling==0 for i in range(len(t))]\r\n\t\t\ttomo = tomo[index,:,:]\r\n\t\t\tSXR = SXR[index,:]\r\n\t\t\teSXR = eSXR[index,:]\r\n\t\t\tSXRfit = SXRfit[index,:]\r\n\t\t\tt = t[index]\r\n\t\t\tCHI2 = CHI2[index]\r\n\r\n\t\tn_reconstr += len(t)\r\n\r\n\t\tg = new_file.create_group(pulse)\r\n\t\tg.create_dataset('t', data=t)\r\n\t\tg.create_dataset('SXR', data=SXR)\r\n\t\tg.create_dataset('eSXR', data=eSXR)\r\n\t\tg.create_dataset('tomo', data=tomo)\r\n\t\tg.create_dataset('SXRfit', data=SXRfit)\r\n\t\tg.create_dataset('CHI2', data=CHI2)\r\n\r\n\t\tprint pulse, t.shape, t.dtype, tomo.shape, tomo.dtype, SXR.shape, SXR.dtype, eSXR.shape, eSXR.dtype\r\n\r\n\tprint '# reconstructions :', n_reconstr\r\n\r\n\t# save detectors and last pulse used. Later it will be needed to know the geometry\r\n\t# so the chi2 value can be correctly calculated \r\n\t# -1 due to conversion from matlab to python indeces\r\n\tSXRA = np.squeeze(f['dets'][0][0]) - 1\r\n\tSXRB = np.squeeze(f['dets'][0][1]) - 1\r\n\tSXRF = np.squeeze(f['dets'][0][2]) - 1\r\n\tprint 'SXRA :', SXRA\r\n\tprint 'SXRB :', SXRB\r\n\tprint 'SXRF :', SXRF\r\n\r\n\tnp.savez(data_directory + 'tomo_GEOM.npz', SXRA = SXRA, SXRB = SXRB, SXRF = SXRF, last_pulse = pulse)", "def to_h5(self):\n import time\n from pymicro import __version__ as pymicro_version\n\n print('opening file %s.h5 for writing' % self.name)\n f = h5py.File('%s.h5' % self.name, 'w')\n f.attrs['Pymicro_Version'] = np.string_(pymicro_version)\n f.attrs['HDF5_Version'] = h5py.version.hdf5_version\n f.attrs['h5py_version'] = h5py.version.version\n f.attrs['file_time'] = time.time()\n f.attrs['microstructure_name'] = self.name\n if hasattr(self, 'data_dir'):\n f.attrs['data_dir'] = self.data_dir\n # ensemble data\n ed = f.create_group('EnsembleData')\n cs = ed.create_group('CrystalStructure')\n sym = self.get_lattice().get_symmetry()\n cs.attrs['symmetry'] = sym.to_string()\n lp = cs.create_dataset('LatticeParameters',\n data=np.array(self.get_lattice().get_lattice_parameters(), dtype=np.float32))\n # feature data\n fd = f.create_group('FeatureData')\n grain_ids = fd.create_dataset('grain_ids',\n data=np.array([g.id for g in self.grains], dtype=np.int))\n avg_rods = fd.create_dataset('R_vectors',\n data=np.array([g.orientation.rod for g in self.grains], dtype=np.float32))\n centers = fd.create_dataset('centers',\n data=np.array([g.center for g in self.grains], dtype=np.float32))\n # cell data\n cd = f.create_group('CellData')\n if hasattr(self, 'grain_map') and self.grain_map is not None:\n gm = cd.create_dataset('grain_ids', data=self.grain_map, compression='gzip', compression_opts=9)\n gm.attrs['voxel_size'] = self.voxel_size\n if hasattr(self, 'mask') and self.mask is not None:\n ma = cd.create_dataset('mask', data=self.mask, compression='gzip', compression_opts=9)\n ma.attrs['voxel_size'] = self.voxel_size\n print('done writing')\n f.close()", "def make_fluximage(grism_root='COSMOS-3-G141', wavelength=1.1e4, direct_image=None, match_toler=1, verbose=True):\n \n from pyraf import iraf\n \n out_image = 'DATA/'+grism_root.replace('G141','f%03d' %(wavelength/100))+'.fits'\n \n ##### Get the path and read the catalogs\n PATH = unicorn.analysis.get_grism_path(grism_root)\n PWD=os.getcwd()\n print PATH\n os.chdir(PATH)\n \n ## read photometric, redshift, SPS catalogs\n cat, zout, fout = unicorn.analysis.read_catalogs(root=grism_root)\n ## path where other eazy outputs live\n OUTPUT_DIRECTORY = os.path.dirname(zout.filename)\n MAIN_OUTPUT_FILE = os.path.basename(zout.filename).split('.zout')[0]\n ## read grism outputs\n grismCat, SPC = unicorn.analysis.read_grism_files(root=grism_root)\n \n detect_wlen = np.float(grismCat.DETECT_FILTER.strip('MAG_FW'))*10\n \n #### Detection and segmentation images\n if direct_image is None:\n direct_image = glob.glob('PREP_FLT/'+grism_root.replace('G141','*')+'_drz.fits')[0]\n \n seg_file = grismCat.filename.replace('drz.cat','seg.fits')\n seg_file = threedhst.utils.find_fits_gz(seg_file)\n \n direct = pyfits.open(direct_image)\n seg = pyfits.open(seg_file)\n \n #### Loop through objects in the catalog\n cosfact = np.cos(np.median(grismCat.dec)/360*2*np.pi)\n xint = np.array([wavelength, detect_wlen])\n\n #### If a Grism SPC file exists, only use IDs defined there\n #### Otherwise use all objects in the SEx. catalog\n if SPC is not None:\n ids = SPC._ext_map\n else:\n ids = grismCat.id\n \n for j, id in enumerate(ids):\n progress = '%2d' %(np.int(j*100./len(ids))) + '%'\n print unicorn.noNewLine+out_image+': '+progress\n \n i = np.arange(grismCat.nrows)[grismCat.id == id][0]\n \n dr = np.sqrt((cat.ra-grismCat.ra[i])**2*cosfact**2+\n (cat.dec-grismCat.dec[i])**2)*3600\n mat = np.where(dr == np.min(dr))[0][0]\n \n scale = 1.\n \n if dr[mat] < match_toler:\n lambdaz, temp_sed, lci, obs_sed, fobs, efobs = \\\n eazy.getEazySED(mat, MAIN_OUTPUT_FILE=MAIN_OUTPUT_FILE, \\\n OUTPUT_DIRECTORY=OUTPUT_DIRECTORY, \\\n CACHE_FILE = 'Same')\n \n #### Smooth to grism resolution\n try:\n lambdaz, temp_sed = unicorn.analysis.convolveWithThumb(id, lambdaz, temp_sed, SPC)\n except:\n pass\n # lambdaz2, temp_sed2 = unicorn.analysis.convolveWithThumb(id, lambdaz, temp_sed, SPC, oned=False)\n \n yint = np.interp(xint, lambdaz, temp_sed)\n scale = yint[0]/yint[1] \n direct[1].data[seg[0].data == id] *= scale\n \n ### f_nu\n direct[1].data *= wavelength**2/detect_wlen**2\n \n ### Write the image, but keep just the SCI extension\n direct[1].writeto('/tmp/fluximage.fits', clobber=True)\n try:\n os.remove(out_image)\n except:\n pass\n \n os.chdir(PWD)\n iraf.imcopy('/tmp/fluximage.fits[1]',out_image)\n \n return out_image", "def __init__(self, name, size=50, sector=0, cadence=None):\n super(Source_cut_pseudo, self).__init__()\n if cadence is None:\n cadence = []\n self.name = name\n self.size = size\n self.sector = sector\n self.camera = 0\n self.ccd = 0\n self.wcs = []\n self.time = np.arange(10)\n self.flux = 20 * np.ones((100, 50, 50)) + np.random.random(size=(100, 50, 50))\n star_flux = np.random.random(100) * 1000 + 200\n star_x = np.random.random(100) * 50 - 0.5\n star_y = np.random.random(100) * 50 - 0.5\n star_x_round = np.round(star_x)\n star_y_round = np.round(star_y)\n for j in range(100):\n for i in range(100):\n self.flux[j, int(star_y_round[i]), int(star_x_round[i])] += star_flux[i]\n try:\n self.flux[j, int(star_y_round[i]), int(star_x_round[i]) + 1] += star_flux[i]\n except:\n continue\n self.flux_err = []\n self.gaia = []\n self.cadence = cadence\n self.quality = []\n self.mask = np.ones(np.shape(self.flux[0]))\n\n # t_tic = Table()\n # t_tic[f'tic'] = tic_id[in_frame]\n t = Table()\n t[f'tess_mag'] = - star_flux\n t[f'tess_flux'] = star_flux\n t[f'tess_flux_ratio'] = star_flux / np.max(star_flux)\n t[f'sector_{self.sector}_x'] = star_x\n t[f'sector_{self.sector}_y'] = star_y\n gaia_targets = t # TODO: sorting not sorting all columns\n gaia_targets.sort('tess_mag')\n self.gaia = gaia_targets", "def create_datacube(infile, outfile='', b_type='intensity', nu=0, mol='', source_name='', spec_axis=True, overwrite=False, verbose=False):\n\tfrom glob import glob\n\n\tdef del_key(d, k):\n\t\ttry:\n\t\t\tdel d[k]\n\t\texcept:\n\t\t\tpass\n\n\tfname = sys._getframe().f_code.co_name\n\tstart_time = time.time()\n\n\t# Loop over input files \n\tcube = list()\t\n\tfor velmap in sorted(glob(infile)):\n\t\tinput_from_file = True\n\n\t\td = fits.getdata(str(velmap))\n\t\theader = fits.getheader(str(velmap))\n\n\t\tif 'intensity' in b_type.lower():\t\n\t\t\tbtype = 'Intensity'\n\t\t\tb_unit = 'Jy/pixel '\n\t\t\tidx = 0\t\t\t\n\t\t\tcube.append(d[idx])\n\n\t\telif 'optical_depth' in b_type.lower():\n\t\t\tbtype = 'Optical depth'\n\t\t\tb_unit = ''\n\t\t\tidx = 4\t\t\t\n\t\t\tcube.append(d[idx])\n\n\t\telse:\n\t\t\traise NameError(\"Brightness type incorrect. Pick 'intensity' or 'optical_depth'\")\n\n\n\t#Write required keywords\n\t#1st axis\n\theader['CTYPE1'] = 'RA---SIN'\n\theader['CRVAL1'] = np.float(header['CRVAL1A'])\n\t# header['CRPIX1'] = np.float(header['NAXIS1']/2 + 1)\n\theader['CRPIX1'] = np.float(header['CRPIX1A'])\n\theader['CDELT1'] = -np.float(header['CDELT1A'])\n\theader['CUNIT1'] = 'deg'\n\theader['CROTA1'] = np.float(0)\n\t#2nd axis\n\theader['CTYPE2'] = 'DEC--SIN'\n\theader['CRVAL2'] = np.float(header['CRVAL2A'])\n\t# header['CRPIX2'] = np.float(header['NAXIS1']/2 + 1)\n\theader['CRPIX2'] = np.float(header['CRPIX2A'])\n\theader['CDELT2'] = np.float(header['CDELT2A'])\n\theader['CUNIT2'] = 'deg'\n\theader['CROTA2'] = np.float(0)\n\t#3rd axis\n\tif spec_axis:\n\t\theader['NAXIS'] = 3\n\t\theader['CTYPE3'] = 'VRAD'\n\t\theader['CRVAL3'] = np.float(0)\n\t\theader['CRPIX3'] = np.float(int((header['CHANNELS']/2)+1) if (header['CHANNELS']%2 != 0) else int((header['CHANNELS']/2)))\n\t\theader['CDELT3'] = np.float((2*header['MAXVEL'])/(header['CHANNELS']))\n\t\theader['CUNIT3'] = 'm/s'\n\t\theader['CROTA3'] = np.float(0)\n\n\telse:\n\t\theader['NAXIS'] = 2\n\t\tfor k in ['CTYPE3','CRPIX3','CDELT3','CUNIT3','CRVAL3','CROTA3']:\n\t\t\tdel_key(header, k)\n\n\t# Add missing keywords \tsrc: http://www.alma.inaf.it/images/ArchiveKeyworkds.pdf\n\tif source_name != '':\n\t\theader['OBJECT'] = source_name\n\tif mol != '':\n\t\theader['MOLECULE'] = mol\n\theader['BTYPE'] = btype\n\theader['BSCALE'] = 1.0\n\theader['BUNIT'] = b_unit\n\theader['BMAJ'] = np.abs(header['CDELT1'])\n\theader['BMIN'] = np.abs(header['CDELT2'])\n\theader['BPA'] = 0\n\theader['BZERO'] = 0.0\n\theader['RADESYS'] = 'ICRS'\n\theader['SPECSYS'] = 'LSRK'\n\theader['TIMESYS'] = 'UTC'\n\theader['DATE'] = f'{str(datetime.utcnow()).split()[0]}T{str(datetime.utcnow()).split()[1]}'\n\theader['FREQ'] = np.float32(nu)\n\theader['RESTFRQ'] = np.float32(nu)\n\n\t# Delete extra WCSs included in the header\n\tkeywords_to_delete = ['CTYPE1A','CRVAL1A','CRPIX1A','CDELT1A','CUNIT1A','CTYPE1B','CRVAL1B','CRPIX1B','CDELT1B','CUNIT1B','CTYPE1C','CRVAL1C','CRPIX1C','CDELT1C','CUNIT1C']\n\tkeywords_to_delete += ['CTYPE2A','CRVAL2A','CRPIX2A','CDELT2A','CUNIT2A','CTYPE2B','CRVAL2B','CRPIX2B','CDELT2B','CUNIT2B','CTYPE2C','CRVAL2C','CRPIX2C','CDELT2C','CUNIT2C']\n\tkeywords_to_delete += ['DETGRID','ID','CRPIX4','CTYPE4','CDELT4','CUNIT4','CRVAL4','NAXIS4']\n\t\n\tfor k in keywords_to_delete:\n\t\tdel_key(header, k)\n\n\n\t# Write data to fits file if required\n\twrite_fits(outfile, cube, header, overwrite, fname, verbose)\n\n\t# Print the time taken by the function\n\telapsed_time(time.time()-start_time, fname, verbose)\n\n\treturn np.array(cube)", "def load_fluctuations_3D_fluc_only(self):\n #similar to the 2D case, we first read one file to determine the total toroidal plane number in the simulation\n flucf = self.xgc_path + 'xgc.3d.'+str(self.time_steps[0]).zfill(5)+'.h5'\n fluc_mesh = h5.File(flucf,'r')\n\n self.n_plane = fluc_mesh['dpot'].shape[1]\n dn = int(self.n_plane/self.n_cross_section)\n self.center_planes = np.arange(self.n_cross_section)*dn\n\n self.planes = np.unique(np.array([np.unique(self.prevplane),np.unique(self.nextplane)]))\n self.planeID = {self.planes[i]:i for i in range(len(self.planes))} #the dictionary contains the positions of each chosen plane, useful when we want to get the data on a given plane known only its plane number in xgc file.\n\n #initialize the arrays\n if(self.HaveElectron):\n self.nane = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n nane_all = np.zeros((self.n_plane,len(self.time_steps),len(self.mesh['R'])))\n if(self.load_ions):\n self.dni = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n dni_all = np.zeros((self.n_plane,len(self.time_steps),len(self.mesh['R'])))\n self.phi = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n phi_all = np.zeros((self.n_plane,len(self.time_steps),len(self.mesh['R'])))\n\n #load all the rest of the files\n for i in range(1,len(self.time_steps)):\n flucf = self.xgc_path + 'xgc.3d.'+str(self.time_steps[i]).zfill(5)+'.h5'\n fluc_mesh = h5.File(flucf,'r')\n for j in range(self.n_plane):\n phi_all[j,i] += np.swapaxes(fluc_mesh['dpot'][...][:,j],0,1)\n if(self.HaveElectron):\n nane_all[j,i] += np.swapaxes(fluc_mesh['eden'][...][:,j],0,1)\n if(self.load_ions):\n dni_all[j,i] += np.swapaxes(fluc_mesh['iden'][...][:,j],0,1)\n fluc_mesh.close()\n\n\n #similar to the 2D case, we take care of the equilibrium relaxation contribution. See details in the comments in 2D loading function.\n\n phi_avg_tor = np.average(phi_all,axis = 0)\n if self.HaveElectron:\n nane_avg_tor = np.average(nane_all,axis=0)\n if self.load_ions:\n dni_avg_tor = np.average(dni_all,axis=0)\n\n for j in range(self.n_cross_section):\n self.phi[j,...] = np.swapaxes(phi_all[(self.center_planes[j] + self.planes)%self.n_plane,:,:],0,1) - phi_avg_tor[:,np.newaxis,:]\n if self.HaveElectron:\n self.nane[j,...] = np.swapaxes(nane_all[(self.center_planes[j] + self.planes)%self.n_plane,:,:],0,1) - nane_avg_tor[:,np.newaxis,:]\n if self.load_ions:\n self.dni[j,...] = np.swapaxes(dni_all[(self.center_planes[j] + self.planes)%self.n_plane,:,:],0,1) - dni_avg_tor[:,np.newaxis,:]\n\n self.ne0[:] += np.average(phi_avg_tor,axis=0)\n if self.HaveElectron:\n self.ne0[:] += np.average(nane_avg_tor,axis=0)\n self.ni0[:] += np.average(phi_avg_tor,axis=0)\n if self.load_ions:\n self.ni0[:] += np.average(dni_avg_tor,axis=0)\n\n return 0", "def build_dataset(width=21, step=10, threshold=5, formants_number=4, wav_number=15, random_sate=None):\n data = pick_random_files(wav_number, random_state=random_sate)\n\n fs = []\n duration = []\n autocorr_pitch_mean = []\n autocorr_pitch_median = []\n cepstrum_pitch_mean = []\n cepstrum_pitch_median = []\n form = {}\n dmfcc = {}\n speaker = []\n\n for i in range(13):\n dmfcc[f'mfcc{i}'] = []\n\n for i in range(formants_number):\n form[f'f{i+1}_mean'] = []\n\n for spkr, files in data.items():\n for sfs, signal in files:\n sformants = formants(signal, width, step, sfs)\n auto_pitch = get_pitch(signal, width, step, sfs, threshold, method=autocorrelation, extend=False)\n cepstrum_pitch = get_pitch(signal, width, step, sfs, threshold, method=cepstrum, extend=False)\n smfcc = mfcc(signal, width, step, sfs)\n\n for i in range(smfcc.shape[1]):\n dmfcc[f'mfcc{i}'].append(smfcc[:, i].mean())\n\n for i in range(formants_number):\n form[f'f{i+1}_mean'].append(sformants[:, i].mean())\n\n fs.append(sfs)\n duration.append(signal.size / sfs)\n\n autocorr_pitch_mean.append(auto_pitch.mean())\n autocorr_pitch_median.append(np.median(auto_pitch))\n cepstrum_pitch_mean.append(cepstrum_pitch.mean())\n cepstrum_pitch_median.append(np.median(cepstrum_pitch))\n speaker.append(spkr)\n\n d = {'fs': fs, 'duration': duration, 'autocorr_pitch_mean': autocorr_pitch_mean,\n 'autocorr_pitch_median': autocorr_pitch_median,\n 'cepstrum_pitch_mean': cepstrum_pitch_mean, 'cepstrum_pitch_median': cepstrum_pitch_median,\n 'speaker': speaker}\n d.update(form)\n d.update(dmfcc)\n\n return pd.DataFrame(data=d)", "def to_odim(volume, filename, timestep=0):\n root = volume.root\n\n h5 = h5py.File(filename, \"w\")\n\n # root group, only Conventions for ODIM_H5\n _write_odim({\"Conventions\": \"ODIM_H5/V2_2\"}, h5)\n\n # how group\n how = {}\n how.update({\"_modification_program\": \"wradlib\"})\n\n h5_how = h5.create_group(\"how\")\n _write_odim(how, h5_how)\n\n sweepnames = root.sweep_group_name.values\n\n # what group, object, version, date, time, source, mandatory\n # p. 10 f\n what = {}\n if len(sweepnames) > 1:\n what[\"object\"] = \"PVOL\"\n else:\n what[\"object\"] = \"SCAN\"\n what[\"version\"] = \"H5rad 2.2\"\n what[\"date\"] = str(root.time_coverage_start.values)[:10].replace(\"-\", \"\")\n what[\"time\"] = str(root.time_coverage_end.values)[11:19].replace(\":\", \"\")\n what[\"source\"] = root.attrs[\"instrument_name\"]\n\n h5_what = h5.create_group(\"what\")\n _write_odim(what, h5_what)\n\n # where group, lon, lat, height, mandatory\n where = {\n \"lon\": root.longitude.values,\n \"lat\": root.latitude.values,\n \"height\": root.altitude.values,\n }\n h5_where = h5.create_group(\"where\")\n _write_odim(where, h5_where)\n\n # datasets\n ds_list = [f\"dataset{i + 1}\" for i in range(len(sweepnames))]\n ds_idx = np.argsort(ds_list)\n for idx in ds_idx:\n if isinstance(volume, (OdimH5, CfRadial)):\n ds = volume[f\"sweep_{idx + 1}\"]\n elif isinstance(volume, XRadVolume):\n ds = volume[idx][timestep].data\n ds = ds.drop_vars(\"time\", errors=\"ignore\").rename({\"rtime\": \"time\"})\n else:\n ds = volume[idx]\n if \"time\" not in ds.dims:\n ds = ds.expand_dims(\"time\")\n ds = ds.isel(time=timestep, drop=True)\n ds = ds.drop_vars(\"time\", errors=\"ignore\").rename({\"rtime\": \"time\"})\n h5_dataset = h5.create_group(ds_list[idx])\n\n # what group p. 21 ff.\n h5_ds_what = h5_dataset.create_group(\"what\")\n ds_what = {}\n # skip NaT values\n valid_times = ~np.isnat(ds.time.values)\n t = sorted(ds.time.values[valid_times])\n start = dt.datetime.utcfromtimestamp(np.rint(t[0].astype(\"O\") / 1e9))\n end = dt.datetime.utcfromtimestamp(np.rint(t[-1].astype(\"O\") / 1e9))\n ds_what[\"product\"] = \"SCAN\"\n ds_what[\"startdate\"] = start.strftime(\"%Y%m%d\")\n ds_what[\"starttime\"] = start.strftime(\"%H%M%S\")\n ds_what[\"enddate\"] = end.strftime(\"%Y%m%d\")\n ds_what[\"endtime\"] = end.strftime(\"%H%M%S\")\n _write_odim(ds_what, h5_ds_what)\n\n # where group, p. 11 ff. mandatory\n h5_ds_where = h5_dataset.create_group(\"where\")\n rscale = ds.range.values[1] / 1.0 - ds.range.values[0]\n rstart = (ds.range.values[0] - rscale / 2.0) / 1000.0\n # todo: make this work for RHI's\n a1gate = np.argsort(ds.sortby(\"azimuth\").time.values)[0]\n try:\n fixed_angle = ds.fixed_angle\n except AttributeError:\n fixed_angle = ds.elevation.round(decimals=1).median().values\n ds_where = {\n \"elangle\": fixed_angle,\n \"nbins\": ds.range.shape[0],\n \"rstart\": rstart,\n \"rscale\": rscale,\n \"nrays\": ds.azimuth.shape[0],\n \"a1gate\": a1gate,\n }\n _write_odim(ds_where, h5_ds_where)\n\n # how group, p. 14 ff.\n h5_ds_how = h5_dataset.create_group(\"how\")\n tout = [tx.astype(\"O\") / 1e9 for tx in ds.sortby(\"azimuth\").time.values]\n tout_sorted = sorted(tout)\n\n # handle non-uniform times (eg. only second-resolution)\n if np.count_nonzero(np.diff(tout_sorted)) < (len(tout_sorted) - 1):\n tout = np.roll(\n np.linspace(tout_sorted[0], tout_sorted[-1], len(tout)), a1gate\n )\n tout_sorted = sorted(tout)\n\n difft = np.diff(tout_sorted) / 2.0\n difft = np.insert(difft, 0, difft[0])\n azout = ds.sortby(\"azimuth\").azimuth\n diffa = np.diff(azout) / 2.0\n diffa = np.insert(diffa, 0, diffa[0])\n elout = ds.sortby(\"azimuth\").elevation\n diffe = np.diff(elout) / 2.0\n diffe = np.insert(diffe, 0, diffe[0])\n try:\n sweep_number = ds.sweep_number + 1\n except AttributeError:\n sweep_number = timestep\n ds_how = {\n \"scan_index\": sweep_number,\n \"scan_count\": len(sweepnames),\n \"startazT\": tout - difft,\n \"stopazT\": tout + difft,\n \"startazA\": azout - diffa,\n \"stopazA\": azout + diffa,\n \"startelA\": elout - diffe,\n \"stopelA\": elout + diffe,\n }\n _write_odim(ds_how, h5_ds_how)\n\n # write moments\n _write_odim_dataspace(ds, h5_dataset)\n\n h5.close()", "def load_fluctuations_3D_all(self):\n #similar to the 2D case, we first read one file to determine the total toroidal plane number in the simulation\n flucf = self.xgc_path + 'xgc.3d.'+str(self.time_steps[0]).zfill(5)+'.h5'\n fluc_mesh = h5.File(flucf,'r')\n\n self.planes = np.unique(np.array([np.unique(self.prevplane),np.unique(self.nextplane)]))\n self.planeID = {self.planes[i]:i for i in range(len(self.planes))} #the dictionary contains the positions of each chosen plane, useful when we want to get the data on a given plane known only its plane number in xgc file.\n if(self.HaveElectron):\n self.nane = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n self.nane_bar = np.zeros((len(self.time_steps)))\n\n if(self.load_ions):\n self.dni = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n self.dni_bar = np.zeros((len(self.time_steps)))\n\n self.phi = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n self.phi_bar = np.zeros((len(self.time_steps)))\n for i in range(len(self.time_steps)):\n flucf = self.xgc_path + 'xgc.3d.'+str(self.time_steps[i]).zfill(5)+'.h5'\n fluc_mesh = h5.File(flucf,'r')\n\n if(i==0):\n #self.n_plane = fluc_mesh['dpot'].shape[1]\n dn = int(self.n_plane/self.n_cross_section)\n self.center_planes = np.arange(self.n_cross_section)*dn\n\n self.phi_bar[i] = np.mean(fluc_mesh['dpot'][...])\n if (self.HaveElectron):\n self.nane_bar[i] = np.mean(fluc_mesh['eden'][...])\n if (self.load_ions):\n self.dni_bar[i] = np.mean(fluc_mesh['iden'][...])\n\n for j in range(self.n_cross_section):\n self.phi[j,i] += np.swapaxes(fluc_mesh['dpot'][...][:,(self.center_planes[j] + self.planes)%self.n_plane],0,1)\n self.phi[j,i] -= self.phi_bar[i]\n if(self.HaveElectron):\n self.nane[j,i] += np.swapaxes(fluc_mesh['eden'][...][:,(self.center_planes[j] + self.planes)%self.n_plane],0,1)\n self.nane[j,i] -= self.nane_bar[i]\n if(self.load_ions):\n self.dni[j,i] += np.swapaxes(fluc_mesh['iden'][...][:,(self.center_planes[j] + self.planes)%self.n_plane],0,1)\n self.dni[j,i] -= self.dni_bar[i]\n fluc_mesh.close()\n\n return 0", "def make_rms_map():\n\tpath = '/nfs/slac/g/ki/ki19/deuce/AEGIS/unzip/'\n\tfile_name = path+'seg_ids.txt'\n\tall_seg_ids = np.loadtxt(file_name, delimiter=\" \",dtype='S2')\n\t#all_seg_ids=['01']\n\tfilters = ['f606w', 'f814w']\n\tfor f in filters:\n\t\tfor fl in glob.glob(path+f+'/*_rms.fits'):\n\t\t\tos.remove(fl)\n\t\tfor id in all_seg_ids:\n\t\t\tfile_name = path + f +'/EGS_10134_'+ id +'_acs_wfc_'+f+'_30mas_unrot_wht.fits'\n\t\t\thdu = pyfits.open(file_name)\n\t\t\tdat = hdu[0].data\n\t\t\tnew_dat = 1/(np.array(dat)**0.5)\n\t\t\tnew_header = hdu[0].header\n\t\t\thdu.close()\n\t\t\tnew_name = path + f +'/EGS_10134_'+ id +'_acs_wfc_'+f+'_30mas_unrot_rms.fits'\n\t\t\tpyfits.writeto(new_name, new_dat, new_header)", "def process_convert_dcm_to_hdf5(paths, params, copy_dcm_files = False):\n\n\t# read all dicom files\n\tF = [f for f in read_directory(paths['mri_folder']) if f[-4:] == '.dcm']\n\n\t# loop over each file, read dicom files, save data\n\tfor f in F:\n\n\t\t# verbose\n\t\tlogging.info(f'Processing file : {f}')\n\n\t\t# read dcm file\n\t\tdataset = pydicom.dcmread(f)\n\n\t\t# construct meta data\n\t\tmeta_data = {\t'SOPClassUID' : dataset.SOPClassUID,\n\t\t\t\t\t\t'SeriesInstanceUID' : dataset.SeriesInstanceUID,\n\t\t\t\t\t\t'PatientName' : dataset.PatientName,\n\t\t\t\t\t\t'PatientID' : dataset.PatientID,\n\t\t\t\t\t\t'SeriesNumber' : dataset.SeriesNumber,\n\t\t\t\t\t\t'Rows' : dataset.Rows,\n\t\t\t\t\t\t'Columns' : dataset.Columns,\n\t\t\t\t\t\t'AcquisitionDateTime' : dataset.AcquisitionDateTime,\n\t\t\t\t\t\t'ProtocolName' : dataset.ProtocolName,\n\t\t\t\t\t\t'SeriesDescription' : dataset.SeriesDescription\n\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t# convert all meta data to string\n\t\tmeta_data = {key : str(value) for key, value in meta_data.items()}\n\n\t\t# get the MRI image\t\t\t\t\n\t\timage = dataset.pixel_array\n\n\t\t# verbose\n\t\tlogging.debug(f'Image shape : {image.shape}')\n\t\tlogging.debug(f'Image datatype : {image.dtype}')\n\n\t\t# if image has 3 slices, then this is the scout image (the first image to get a quick scan of the sample), this we skip\n\t\tif image.shape[0] == 3:\n\t\t\tlogging.debug('MRI image is scout, skipping...')\n\t\t\tcontinue\n\n\n\t\t# get treatment-sample combination\n\t\ttreatment_sample = re.findall('[0-9]+-[0-9]+', meta_data['PatientName'])[0]\n\n\t\t# get state\n\t\tstate = f.split(os.sep)[-6]\n\n\t\t# change patient name into specific format that will be used throughout all analysis\n\t\t# This is: Torsk [treatment]-[sample] [Tint|fersk]\n\t\t# for example: Tork 1-1 fersk\n\t\tpatient_name = 'Torsk {} {}'.format(treatment_sample, state)\n\n\t\t# check if patient scan is valid\n\t\tif not check_validity_mri_scan(patientname = patient_name, datetime = meta_data['AcquisitionDateTime']):\n\t\t\tlogging.debug('Scan was invalid, skipping...')\n\t\t\tcontinue\n\n\t\tif copy_dcm_files:\n\t\t\t# copy .DCM file to folder with new patient name\n\t\t\tdestination_folder = os.path.join(paths['dcm_folder'], state, patient_name)\n\t\t\t# create state folder\n\t\t\tcreate_directory(destination_folder)\n\t\t\t# define source file\n\t\t\tsource_file = f\n\t\t\t# define destination file\n\t\t\tdestination_file = os.path.join(destination_folder, f'{patient_name}.dcm')\n\t\t\t# start copying\n\t\t\ttry:\n\t\t\t\tshutil.copy(source_file, destination_file)\n\t\t\texcept Exception as e:\n\t\t\t\tlogging.error(f'Failed copying .DCM file to dcm folder: {e}')\n\t\t\n\t\t# add extra meta data\n\t\tmeta_data['ClassLabel'] = create_class_label(patient_name)\n\t\tmeta_data['ProtocolTranslation'] = get_protocol_translation(dataset.ProtocolName)\n\t\tmeta_data['DrippLossPerformed'] = False \n\t\t\t\t\t\t\n\t\t# save data to HDF5\n\t\tsave_data_to_group_hdf5(group = params['group_original_mri'],\n\t\t\t\t\t\t\t\tdata = image,\n\t\t\t\t\t\t\t\tdata_name = patient_name, \n\t\t\t\t\t\t\t\thdf5_file = os.path.join(paths['hdf5_folder'], params['hdf5_file']),\n\t\t\t\t\t\t\t\tmeta_data = meta_data, \n\t\t\t\t\t\t\t\toverwrite = True)", "def specphot(id=69, grism_root='ibhm45030',\n MAIN_OUTPUT_FILE = 'cosmos-1.v4.6',\n OUTPUT_DIRECTORY = '/Users/gbrammer/research/drg/PHOTZ/EAZY/NEWFIRM/v4.6/OUTPUT_KATE/',\n CACHE_FILE = 'Same', Verbose=False,\n SPC = None, cat=None, grismCat = None,\n zout = None, fout = None, OUT_PATH='/tmp/', OUT_FILE_FORMAT=True,\n OUT_FILE='junk.png', GET_SPEC_ONLY=False):\n #import scipy.interpolate as interpol\n \n ### 69, 54!\n \n xxx = \"\"\"\n id=199\n grism_root='ibhm48'\n MAIN_OUTPUT_FILE = 'cosmos-1.v4.6'\n OUTPUT_DIRECTORY = '/Users/gbrammer/research/drg/PHOTZ/EAZY/NEWFIRM/v4.6/OUTPUT_KATE/'\n CACHE_FILE = 'Same'\n \"\"\"\n \n #### Get G141 spectrum\n if Verbose:\n print 'Read SPC'\n \n if SPC is None:\n SPC = threedhst.plotting.SPCFile(grism_root+'_2_opt.SPC.fits',\n axe_drizzle_dir='DRIZZLE_G141')\n \n spec = SPC.getSpec(id)\n if spec is False:\n return False\n \n xmin = 3000\n xmax = 2.4e4\n \n lam = spec.field('LAMBDA')\n flux = spec.field('FLUX')\n ffix = flux-spec.field('CONTAM')\n ferr = spec.field('FERROR') #*0.06/0.128254\n \n if Verbose:\n print 'Read grism catalog'\n \n #### Read the grism catalog and get coords of desired object\n if grismCat is None:\n grismCat = threedhst.sex.mySexCat('DATA/'+grism_root+'_drz.cat')\n \n #### Source size\n R = np.sqrt(np.cast[float](grismCat.A_IMAGE)*np.cast[float](grismCat.B_IMAGE))\n grism_idx = np.where(grismCat.id == id)[0][0]\n \n Rmatch = R[grism_idx]*1.\n \n #print 'R=%f\"' %(Rmatch)\n ra0 = grismCat.ra[grismCat.id == id][0]\n de0 = grismCat.dec[grismCat.id == id][0]\n \n #### Read EAZY outputs and get info for desired object\n if cat is None:\n cat = catIO.ReadASCIICat(OUTPUT_DIRECTORY+'../'+MAIN_OUTPUT_FILE+'.cat')\n \n dr = np.sqrt((cat.ra-ra0)**2*np.cos(de0/360.*2*np.pi)**2+(cat.dec-de0)**2)*3600.\n \n \n photom_idx = np.where(dr == np.min(dr))[0][0]\n \n drMatch = dr[photom_idx]*1.\n #print 'dr = %7.2f\\n' %(drMatch)\n #print drMatch, np.min(dr)\n \n if drMatch > 2:\n return False\n \n if Verbose:\n print 'Read zout'\n if zout is None: \n zout = catIO.ReadASCIICat(OUTPUT_DIRECTORY+'/'+MAIN_OUTPUT_FILE+'.zout')\n \n if fout is None:\n fout = catIO.ReadASCIICat(OUTPUT_DIRECTORY+'/../cosmos-1.m05.v4.6.fout')\n \n if Verbose:\n print 'Read binaries'\n \n lambdaz, temp_sed, lci, obs_sed, fobs, efobs = \\\n eazy.getEazySED(photom_idx, MAIN_OUTPUT_FILE=MAIN_OUTPUT_FILE, \\\n OUTPUT_DIRECTORY=OUTPUT_DIRECTORY, \\\n CACHE_FILE = CACHE_FILE)\n \n try:\n lambdaz, temp_sed_sm = unicorn.analysis.convolveWithThumb(id, lambdaz, temp_sed, SPC)\n except:\n temp_sed_sm = temp_sed*1.\n \n if Verbose: \n print 'Normalize spectrum'\n \n #### Normalize G141 spectrum\n #interp = interpol.interp1d(lambdaz, temp_sed_sm, kind='linear')\n\n q = np.where((lam > 1.08e4) & (lam < 1.68e4) & (flux > 0))[0]\n #### G102\n if lam.min() < 9000:\n q = np.where((lam > 0.8e4) & (lam < 1.13e4) & (flux > 0))[0]\n \n #### ACS G800L\n if lam.min() < 5000:\n q = np.where((lam > 0.55e4) & (lam < 1.0e4) & (flux > 0))[0]\n \n if len(q) == 0:\n return False\n\n yint = np.interp(lam[q], lambdaz, temp_sed_sm)\n \n anorm = np.sum(yint*ffix[q])/np.sum(ffix[q]**2)\n if np.isnan(anorm):\n anorm = 1.0\n total_err = np.sqrt((ferr)**2+(1.0*spec.field('CONTAM'))**2)*anorm\n \n if GET_SPEC_ONLY:\n if drMatch > 1:\n return False\n else:\n return lam, ffix*anorm, total_err, lci, fobs, efobs, photom_idx\n \n if Verbose:\n print 'Start plot'\n \n #### Make the plot\n threedhst.plotting.defaultPlotParameters()\n \n xs=5.8\n ys = xs/4.8*3.2\n if USE_PLOT_GUI:\n fig = plt.figure(figsize=[xs,ys],dpi=100)\n else:\n fig = Figure(figsize=[xs,ys], dpi=100)\n \n fig.subplots_adjust(wspace=0.2,hspace=0.2,left=0.13*4.8/xs, bottom=0.15*4.8/xs,right=1.-0.02*4.8/xs,top=1-0.10*4.8/xs)\n \n ax = fig.add_subplot(111)\n \n ymax = np.max((ffix[q])*anorm)\n \n if Verbose:\n print 'Make the plot'\n \n ax.plot(lambdaz, temp_sed_sm, color='red')\n # plt.errorbar(lam[q], ffix[q]*anorm, yerr=ferr[q]*anorm, color='blue', alpha=0.8)\n ax.plot(lam[q],ffix[q]*anorm, color='blue', alpha=0.2, linewidth=1)\n \n #### Show own extraction\n sp1d = threedhst.spec1d.extract1D(id, root=grism_root, path='./HTML', show=False, out2d=False)\n lam = sp1d['lam']\n flux = sp1d['flux']\n ffix = sp1d['flux']-sp1d['contam'] #-sp1d['background']\n ferr = sp1d['error']\n anorm = np.sum(yint*ffix[q])/np.sum(ffix[q]**2)\n ax.plot(lam[q],ffix[q]*anorm, color='blue', alpha=0.6, linewidth=1)\n \n #### Show photometry + eazy template\n ax.errorbar(lci, fobs, yerr=efobs, color='orange', marker='o', markersize=10, linestyle='None', alpha=0.4)\n ax.plot(lambdaz, temp_sed_sm, color='red', alpha=0.4)\n\n ax.set_ylabel(r'$f_{\\lambda}$')\n \n if plt.rcParams['text.usetex']:\n ax.set_xlabel(r'$\\lambda$ [\\AA]')\n ax.set_title('%s: \\#%d, z=%4.1f' \n %(SPC.filename.split('_2_opt')[0].replace('_','\\_'),id,\n zout.z_peak[photom_idx]))\n else:\n ax.set_xlabel(r'$\\lambda$ [$\\AA$]')\n ax.set_title('%s: #%d, z=%4.1f' \n %(SPC.filename.split('_2_opt')[0].replace('_','\\_'),id,\n zout.z_peak[photom_idx]))\n \n #kmag = 25-2.5*np.log10(cat.ktot[photom_idx])\n kmag = cat.kmag[photom_idx]\n \n ##### Labels\n label = 'ID='+r'%s K=%4.1f $\\log M$=%4.1f' %(np.int(cat.id[photom_idx]),\n kmag, fout.field('lmass')[photom_idx])\n \n ax.text(5e3,1.08*ymax, label, horizontalalignment='left',\n verticalalignment='bottom')\n \n \n label = 'R=%4.1f\"' %(drMatch)\n if drMatch > 1.1:\n label_color = 'red'\n else:\n label_color = 'black'\n ax.text(2.2e4,1.08*ymax, label, horizontalalignment='right',\n color=label_color, verticalalignment='bottom')\n \n ax.set_xlim(xmin,xmax)\n ax.set_ylim(-0.1*ymax,1.2*ymax)\n \n if Verbose:\n print 'Save the plot'\n \n if OUT_FILE_FORMAT:\n out_file = '%s_%05d_SED.png' %(grism_root, id)\n else:\n out_file = OUT_FILE\n \n if USE_PLOT_GUI:\n fig.savefig(OUT_PATH+'/'+out_file,dpi=100,transparent=False)\n plt.close()\n else:\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(OUT_PATH+'/'+out_file, dpi=100, transparent=False)\n \n print unicorn.noNewLine+OUT_PATH+'/'+out_file\n \n if Verbose:\n print 'Close the plot window'", "def get_synth_images():\n \n # code to store synthetic image data in an h5 file\n# import os\n# files = os.listdir(\"C:/Users/richardcouperthwaite/Documents/GitHub/MSEN655/classification/data/Cropped_images\")\n# print(files)\n# hf = h5py.File('SynthData.hdf5', 'w')\n# for file in files:\n# x = load_micrograph(\"C:/Users/richardcouperthwaite/Documents/GitHub/MSEN655/classification/data/Cropped_images/\"+file)\n# hf.create_dataset(file, data=x)\n \n # code to import the synthetic images from an h5 file\n hf = h5py.File('SynthData.hdf5', 'r')\n for key in hf.keys():\n x = hf.get(key)\n try:\n labels.append([key])\n x_input = np.r_[x_input, x]\n except NameError:\n labels = [[key]]\n x_input = x\n \n return labels, x_input", "def read_files(file):\n # TODO Fix search into the dictionary for increased speed\n file_path, filename = rename_to_text(file)\n print('\\n\\n')\n print(file_path)\n print('\\n\\n')\n # find the exposure (L)\n # langmuir.append(langmuir_determination(filename=filename))\n try:\n # read file\n file_read = pd.read_csv(file_path, sep='\\t', header=3)\n\n # remove whitespace\n column_names = [file_read.keys()[i].lstrip() for i in range(0, len(file_read.keys()))]\n # rename columns\n file_read.columns = column_names\n # drop the time column and mse=8\n # file_read = file_read.drop([column_names[0], column_names[-1]], axis=1)\n file_read = file_read.drop([column_names[0]], axis=1)\n temp = file_read[file_read != 0]\n temp = temp.dropna(axis=0)\n\n\n file_read = file_read.dropna(axis=1)\n\n # for the bug in the labview that the temperature cuts out\n temp = file_read[file_read != 0]\n file_read = temp.dropna(axis=0)\n\n # set the index to be temperature\n file_read = file_read.set_index(file_read.keys()[0])\n except IndexError:\n \"except it is a hiden mass spec file!\"\n file_read = pd.read_csv(file_path, header=29)\n file_read = file_read.dropna(axis=1)\n file_read.drop(['Time', 'ms'],axis=1, inplace=True)\n file_read.set_index('Temperature', inplace=True)\n # pseudo code...\n # pd.DataFrame(molecule_area[i], index=langmuir) and append all of them\n\n return file_read, filename", "def extractVTKfilesStratification(patient_paths):\n\n cont = 0\n\n \n raw_paths = []\n \n raw_path = []\n \n mask_paths = []\n \n mask_path = []\n\n \n for fold in patient_paths:\n \n # Access 3D VTK files\n \n patient_paths[cont] = [preprocessingType(params.prep_step) + path for path in fold]\n \n # Split in case of working with 'oth' modality (oth is a third modality present in Philips and Siemens images, apart from magnitude and phase, from \"other\" --> \"oth\")\n \n if '_oth' in params.train_with:\n \n splitting = params.train_with.split('_')\n \n primary = splitting[0] # Main modality used together with 'oth'\n \n else:\n \n primary = ''\n \n for patient_path in patient_paths[cont]: # Look for all images in the given paths\n \n if params.three_D:\n \n images = sorted(os.listdir(patient_path)) \n \n if 'both' in params.train_with: # Train with magnitude and phase (both)\n \n if params.train_with == 'bothBF' or primary == 'bothBF':\n \n ind_raw = [i for i,s in enumerate(images) if 'magBF' in s]\n \n elif params.train_with == 'both' or primary == 'both':\n \n ind_raw = [i for i,s in enumerate(images) if 'mag_' in s]\n \n else:\n \n if not('_oth' in params.train_with):\n \n ind_raw = [i for i,s in enumerate(images) if params.train_with in s]\n \n else:\n \n if primary == 'mag':\n \n ind_raw = [i for i,s in enumerate(images) if 'mag_' in s]\n \n else:\n \n ind_raw = [i for i,s in enumerate(images) if primary in s]\n \n ind_msk = [i for i,s in enumerate(images) if 'msk' in s]\n \n for ind, ind_m in zip(ind_raw,ind_msk):\n \n raw_path.append(patient_path + images[ind])\n \n mask_path.append(patient_path + images[ind_m])\n\n \n else:\n \n # Access 2D VTK files\n \n modalities = sorted(os.listdir(patient_path))\n \n for modality in modalities:\n \n modality_path = patient_path + modality + '/'\n \n images = sorted(os.listdir(modality_path))\n \n if params.train_with == 'mag_' or primary == 'mag': \n \n if modality == 'mag':\n \n raw_path.append([modality_path + item for item in images if not('sum' in item) and (not('mip' in item))])\n\n \n elif params.train_with == 'pha' or primary == 'pha':\n \n if modality == 'pha':\n \n raw_path.append([modality_path + item for item in images if (not('sum' in item)) and (not('mip' in item))])\n\n\n \n elif params.train_with == 'magBF' or primary == 'magBF':\n \n if modality == 'magBF':\n \n raw_path.append([modality_path + item for item in images if (not('sum' in item)) and (not('mip' in item))])\n\n \n elif params.train_with == 'both' or primary == 'both':\n \n if modality == 'mag':\n \n raw_path.append([modality_path + item for item in images if (not('sum' in item)) and (not('mip' in item))])\n\n \n elif params.train_with == 'bothBF' or primary == 'bothBF':\n \n if modality == 'magBF':\n \n raw_path.append([modality_path + item for item in images if (not('sum' in item)) and (not('mip' in item))])\n\n \n if modality == 'msk':\n \n mask_path.append([modality_path + image for image in images])\n \n if not(params.three_D):\n \n mask_path = list(itertools.chain.from_iterable(mask_path)) \n \n raw_path = list(itertools.chain.from_iterable(raw_path)) \n \n \n raw_paths.append(raw_path)\n \n mask_paths.append(mask_path)\n \n raw_path = []\n \n mask_path = []\n \n cont += 1\n \n \n return raw_paths, mask_paths" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an XML representation of the Microstructure instance.
def to_xml(self, doc): root = doc.createElement('Microstructure') doc.appendChild(root) name = doc.createElement('Name') root.appendChild(name) name_text = doc.createTextNode(self.name) name.appendChild(name_text) grains = doc.createElement('Grains') root.appendChild(grains) for i, grain in enumerate(self.grains): file_name = os.path.join(self.name, '%s_%d.vtu' % (self.name, i)) grains.appendChild(grain.to_xml(doc, file_name))
[ "def xml(self):\n return oxml_tostring(self, encoding='UTF-8', standalone=True)", "def xml(self):\n return self._domain.xml", "def xml(self) -> ET.Element:\n return self.device_info.xml", "def get_xml(self):\n return etree.tostring(self.xml_tree, pretty_print=True, encoding=\"utf-8\").decode(\"utf-8\")", "def generate_xml(self):\n raise NotImplementedError()", "def generate_xml(self):\n assert self.xml_root != None, 'The self.xml_root variable must be set in your inheriting class'\n output = StringIO.StringIO()\n xd = XMLDumper(output, XML_DUMP_PRETTY | XML_STRICT_HDR)\n xd.XMLDumpKeyValue(self.xml_root, self.data.to_dict())\n output.seek(0)\n return output", "def get_raw_xml_output(self):\n\n return self.xml", "def to_etree(self):\n attribs = {\n '{{{pre}}}type'.format(pre=NAMESPACES['xsi']): self.xsi_type,\n 'namespace': self.namespace, 'name': self.name,\n 'value': self.hexvalue, 'valueString': self.value}\n non_empty_attribs = {key: val for (key, val) in attribs.items()\n if val is not None}\n E = ElementMaker()\n return E('labels', non_empty_attribs)", "def to_xml(self):\n ids = {s: i for (i, s) in enumerate(self.Q())}\n\n return '\\n'.join(\n ['<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>', '<structure><type>fa</type><automaton>'] +\n [\n '<state id=\"%d\" name=\"%s\"><x>0</x><y>0</y>%s</state>' %\n ( ids[name], name, '<initial/>' if name == self.q0 else '<final/>' if name in self.F else '' ) \n for name in self.Q()\n ] + [\n '<transition><from>%d</from><to>%d</to><read>%s</read></transition>' % \n ( ids[t[0]], ids[t[2]], t[1] ) \n for t in self.transitions\n ] + \n ['</automaton></structure>']\n )", "def print_xml(self):\r\n\t\tfrom lxml import etree\r\n\t\tfrom ..nrml.common import create_nrml_root\r\n\r\n\t\tencoding='latin1'\r\n\t\ttree = create_nrml_root(self, encoding=encoding)\r\n\t\tprint(etree.tostring(tree, xml_declaration=True, encoding=encoding,\r\n\t\t\t\t\t\t\tpretty_print=True))", "def to_rdfxml(self):\n return rdfviews.MyndighetsforeskriftDescription(self).to_rdfxml()", "def create_xml(self):\n if self.root is not None:\n root = self.root\n signs = root.find('signs')\n if signs is not None: root.remove(signs)\n else:\n root = ET.Element(\"mcbuilder\")\n\n signs = self.signs.create_xml()\n root.insert(0, signs)\n\n return XMLWriter(root).to_string()", "def dump(self):\n return etree.tostring(self.root)", "def __repr__(self):\n args = []\n args.append(\"name=%s\" % repr(self.name))\n\n if self.content != \"\":\n args.append(\"content=%s\" % repr(self.content))\n\n if self.attributes != {}:\n args.append(\"attributes=%s\" % repr(self.attributes))\n\n if self.children != []:\n args.append(\"children=%s\" % repr(self.children))\n\n clas = XMLNode\n if issubclass(self.__class__, XMLNode):\n cls = self.__class__.__name__\n\n return \"%s(%s)\" % (cls, \", \".join(args))", "def to_tree(self):\n builder = DesignBuilder(self)\n builder.generate()\n return builder.root", "def string(self):\n return etree.tostring(self.xml_tree,\n pretty_print=True,\n xml_declaration=False\n ).decode(encoding=\"utf-8\")", "def generateKML(self):\n k = kml.KML()\n k.append(self.generateDocument())\n return k.to_string(prettyprint=True)", "def create_xml(self):\n if self.disabled:\n return None\n # <Site> node\n s = self.ci.create_node()\n # <Build> node\n b = Node(\"Build\")\n s.insertNode(b)\n b.insertNode(Node(\"BuildCommand\", body=self.cmd))\n b.insertNode(Node(\"StartDateTime\", body=self.ci.encodetime(self.t0)))\n b.insertNode(Node(\"EndDateTime\", body=self.ci.encodetime(self.t1)))\n b.insertNode(Node(\"ElapsedMinutes\",\n body=self.ci.encodedur(self.t1-self.t0)))\n # Warnings and errors\n for w in self.warn_err:\n b.insertNode(w.create_xml())\n if self.build_log:\n oldmax = self.ci.max_log\n self.ci.max_log = -1\n b.insertNode(Node(\"Log\",\n attrs={\"Encoding\": \"base64\",\n \"Compression\": \"/bin/gzip\"},\n body=self.ci.encode_log_file(False,\n self.log_file,\n \"base64\", \"gzip\")))\n self.ci.max_log = oldmax\n\n # Delete tmp file\n if self.is_tmp_log:\n try:\n os.remove(self.log_file)\n except:\n pass\n # Done\n return s", "def toDomElement(self):\n dom = parseString('<%s></%s>' % (self.elementType, self.elementType))\n domElement = dom.documentElement\n if self.description is not None:\n domElement.setAttribute('description', self.description)\n e = dom.createTextNode(self.filename)\n domElement.appendChild(e)\n\n return domElement", "def as_xml(self):\n template = jinja2_env.get_template('episode.xml')\n\n return template.render(\n title=escape(self.title),\n url=quoteattr(self.url),\n guid=escape(self.url),\n mimetype=self.mimetype,\n length=self.length,\n date=formatdate(self.date),\n image_url=self.image,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saving the microstructure to the disk. Save the metadata as a XML file and when available, also save the vtk representation of the grains.
def save(self): # save the microstructure instance as xml doc = Document() self.to_xml(doc) xml_file_name = '%s.xml' % self.name print('writing ' + xml_file_name) f = open(xml_file_name, 'wb') doc.writexml(f, encoding='utf-8') f.close() # now save the vtk representation if self.vtkmesh != None: import vtk vtk_file_name = '%s.vtm' % self.name print('writing ' + vtk_file_name) writer = vtk.vtkXMLMultiBlockDataWriter() writer.SetFileName(vtk_file_name) if vtk.vtkVersion().GetVTKMajorVersion() > 5: writer.SetInputData(self.vtkmesh) else: writer.SetInput(self.vtkmesh) writer.Write()
[ "def save(self) -> None:\n if self.meta.file_path:\n # We are a family root node or the user has decided to make us one\n # Save family information\n with self.meta.file_path.open('w') as of:\n of.write(self.to_json())\n\n # Now for saving language information\n # Sound changes cannot be serialized! So we can only save lexicon\n # information.\n if self.lexicon:\n self.lexicon.save(self.meta.lexicon_file_path)\n if self.lexicon_delta:\n self.lexicon_delta.save(self.meta.lexicon_delta_file_path)", "def save(self):\n from vtk import vtkXMLPolyDataWriter\n\n writer = vtkXMLPolyDataWriter()\n writer.SetDataModeToAscii()\n writer.SetFileName('mmviewer_save.vtp')\n\n for polydata in self.polydatas:\n if vtk.VTK_MAJOR_VERSION <= 5:\n writer.SetInput(polydata)\n else:\n writer.SetInputData(polydata)\n writer.Write()\n\n print((\"File 'mmviewer_save.vtp' written in %s\" % getcwd()))\n return", "def metadata_save(self):\n # Serialize. We take care to pretty-print this so it can (sometimes) be\n # parsed by simple things like grep and sed.\n out = json.dumps(self.metadata, indent=2, sort_keys=True)\n DEBUG(\"metadata:\\n%s\" % out)\n # Main metadata file.\n path = self.metadata_path // \"metadata.json\"\n VERBOSE(\"writing metadata file: %s\" % path)\n file_write(path, out + \"\\n\")\n # /ch/environment\n path = self.metadata_path // \"environment\"\n VERBOSE(\"writing environment file: %s\" % path)\n file_write(path, ( \"\\n\".join(\"%s=%s\" % (k,v) for (k,v)\n in sorted(self.metadata[\"env\"].items()))\n + \"\\n\"))\n # mkdir volumes\n VERBOSE(\"ensuring volume directories exist\")\n for path in self.metadata[\"volumes\"]:\n mkdirs(self.unpack_path // path)", "def save(self):\n idevicesDir = self.config.configDir/'idevices'\n if not idevicesDir.exists():\n idevicesDir.mkdir()\n fileOut = open(idevicesDir/'generic.data', 'wb')\n fileOut.write(persist.encodeObject(self.generic))", "def _save(self):\n with open(self.metadata_file, 'w') as f:\n f.write(json.dumps(self._metadata, indent=2))", "def save_meta(self) -> None:\n path = f\"{self._path}.json\"\n # pylint: disable=protected-access\n Abstract._save(path, json.dumps(self._exp._meta, indent=4))", "def write_meta(self):\n\t\t#raise NotImplementedError\n\t\tpath = os.path.join(self.get_private_dir(create=True), \"meta.yaml\")\n\t\tunits = {key:str(value) for key, value in self.units.items()}\n\t\tmeta_info = dict(description=self.description,\n\t\t\t\t\t\t ucds=self.ucds, units=units, descriptions=self.descriptions,\n\t\t\t\t\t\t )\n\t\tvaex.utils.write_json_or_yaml(path, meta_info)", "def save(self):\r\n with open(self._filename, 'w') as f:\r\n pytoml.dump(f, self._collapse(self._toml))", "def save(self, filename='test', ext='obj'):\n\t\tdeselect_all()\n\t\tself.building.select_set(True)\n\t\tbpy.ops.export_scene.obj(filepath=filename, use_selection=True)", "def SaveVTK(self, outfile=\"Output\"):\n writer = vtk.vtkXMLUnstructuredGridWriter()\n writer.SetFileName(outfile + \".vtu\")\n writer.SetInput(self.mesh)\n writer.Write()", "def saveDocument(self, path):\n tmpPath = path.replace('.idml', '.tmp')\n if os.path.exists(tmpPath):\n shutil.rmtree(tmpPath)\n os.mkdir(tmpPath)\n\n zf = zipfile.ZipFile(tmpPath + '.idml', mode='w') # Open export as Zip.\n\n filePath = '/mimetype'\n f = codecs.open(tmpPath + filePath, 'w', encoding='utf-8')\n f.write('application/vnd.adobe.indesign-idml-package')\n f.close()\n zf.write(tmpPath + filePath, arcname=filePath)\n\n #shutil.copy('../../Test/MagentaYellowRectangle/designmap.xml', tmpPath + '/designmap.xml')\n \n filePath = '/designmap.xml'\n f = codecs.open(tmpPath + filePath, 'w', encoding='utf-8')\n self.designMap.writePreXml(f)\n self.designMap.writeXml(f)\n f.close()\n zf.write(tmpPath + filePath, arcname=filePath)\n \n os.mkdir(tmpPath + '/META-INF')\n\n for infoName in self.metaInfo.keys():\n filePath = '/META-INF/%s.xml' % infoName\n f = codecs.open(tmpPath + filePath, 'w', encoding='utf-8')\n self.metaInfo[infoName].writePreXml(f)\n self.metaInfo[infoName].writeXml(f)\n f.close()\n zf.write(tmpPath + filePath, arcname=filePath)\n\n os.mkdir(tmpPath + '/XML')\n\n for fileName in ('Tags', 'BackingStory'):\n filePath = '/XML/%s.xml' % fileName\n f = codecs.open(tmpPath + filePath, 'w', encoding='utf-8')\n if fileName in self.xmlNodes:\n self.xmlNodes[fileName].writePreXml(f)\n self.xmlNodes[fileName].writeXml(f)\n f.close()\n zf.write(tmpPath + filePath, arcname=filePath)\n\n os.mkdir(tmpPath + '/Spreads')\n\n #shutil.copy('../../Test/MagentaYellowRectangle/Spreads/Spread_udc.xml', tmpPath + '/Spreads/Spread_udc.xml')\n for spread in self.spreads:\n filePath = '/' + spread.fileName\n f = codecs.open(tmpPath + filePath, 'w', encoding='utf-8')\n spread.writePreXml(f)\n spread.writeXml(f)\n f.close()\n zf.write(tmpPath + filePath, arcname=filePath)\n \n os.mkdir(tmpPath + '/MasterSpreads')\n\n for masterSpread in self.masterSpreads:\n filePath = '/' + masterSpread.fileName\n f = codecs.open(tmpPath + filePath, 'w', encoding='utf-8')\n masterSpread.writePreXml(f)\n masterSpread.writeXml(f)\n f.close()\n zf.write(tmpPath + filePath, arcname=filePath)\n\n os.mkdir(tmpPath + '/Resources')\n\n for fileName in ('Fonts', 'Graphic', 'Preferences', 'Styles'):\n filePath = '/Resources/%s.xml' % fileName\n f = codecs.open(tmpPath + filePath, 'w', encoding='utf-8')\n if fileName in self.resources:\n self.resources[fileName].writePreXml(f)\n self.resources[fileName].writeXml(f)\n f.close()\n zf.write(tmpPath + filePath, arcname=filePath)\n\n os.mkdir(tmpPath + '/Stories')\n\n for story in self.stories:\n filePath = '/' + story.fileName\n f = codecs.open(tmpPath + filePath, 'w', encoding='utf-8')\n story.writePreXml(f)\n story.writeXml(f)\n f.close()\n zf.write(tmpPath + filePath, arcname=filePath)\n zf.close()", "def escribir(self):\n tree.write('metadata1.xml')\n bs = BeautifulSoup(open('metadata1.xml'), 'xml')\n archivo1 = open('metadata1.xml', \"w+\")\n archivo1.write(bs.prettify())", "def test_save_uml(self):\n self.element_factory.create(UML.Package)\n self.element_factory.create(UML.Diagram)\n self.element_factory.create(UML.Comment)\n self.element_factory.create(UML.Class)\n\n out = PseudoFile()\n storage.save(XMLWriter(out), factory=self.element_factory)\n out.close()\n\n assert \"<Package \" in out.data\n assert \"<Diagram \" in out.data\n assert \"<Comment \" in out.data\n assert \"<Class \" in out.data", "def save(self): \r\n dataPath, metaPath, zipPath = self.expPath() \r\n self.save_as(self.data, dataPath) \r\n self.save_as(self.metadata, metaPath)\r\n # zip(dataPath, metaPath, zipPath)\r\n # os.remove(dataPath)\r\n # os.remove(metaPath)\r\n return", "def save(self):\n # Create needed subfolder if not yet exist\n get_subfolder('population/storage/', f'{self.folder_name}')\n get_subfolder(f'population/storage/{self.folder_name}/', f'{self}')\n get_subfolder(f'population/storage/{self.folder_name}/{self}/', 'generations')\n \n # Save the population\n store_pickle(self, f'population/storage/{self.folder_name}/{self}/generations/gen_{self.generation:05d}')\n self.log(f\"Population '{self}' saved! Current generation: {self.generation}\")", "def save(self):\n self.path.write_text(toml.dumps(self.tomldoc))", "def save(self, single_pass=False):\n\n if not self.meta_root:\n # Assume this is in-memory only.\n return\n\n # Ensure content is loaded before attempting save.\n self.load()\n\n CatalogPartBase.save(self, self.__data, single_pass=single_pass)", "def save(self):\n weights_filepath = '{}_weights.buf'.format(os.path.splitext(self.weights_hdf5_filepath)[0])\n with open(weights_filepath, mode='wb') as f:\n f.write(self.weights)\n metadata_filepath = '{}_metadata.json'.format(os.path.splitext(self.weights_hdf5_filepath)[0])\n with open(metadata_filepath, mode='w') as f:\n json.dump(self.metadata, f)", "def saveFile(self):\n \n base_map.save(\"meteo.html\")", "def save(self, file_name=None):\n if file_name is None:\n assert self.file_name is not None, \"You must specify a file name to write the experiment to!\"\n file_name = self.file_name\n\n #reset any relationships if they have been modified\n for block in self.blocks:\n block.create_many_to_one_relationship(force=True, recursive=True)\n\n if os.path.exists(file_name):\n #delete the old file\n os.remove(file_name)\n\n #write a new file\n of = NeoHdf5IO(file_name)\n self.blocks.sort(key=operator.attrgetter(\"name\"))\n of.write_all_blocks(self.blocks)\n of.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge two `Microstructure` instances together. The function works for two microstructures with grain maps and an overlap between them. Temporarily `Microstructures` restricted to the overlap regions are created and grains are matched between the two based on a disorientation tolerance.
def merge_microstructures(micros, overlap, plot=False): from scipy import ndimage # perform some sanity checks for i in range(2): if not hasattr(micros[i], 'grain_map'): raise ValueError('microstructure instance %s must have an associated grain_map attribute' % micros[i].name) if micros[0].get_lattice() != micros[1].get_lattice(): raise ValueError('both microstructure must have the same crystal lattice') lattice = micros[0].get_lattice() if micros[0].voxel_size != micros[1].voxel_size: raise ValueError('both microstructure must have the same voxel size') voxel_size = micros[0].voxel_size # create two microstructure of the overlapping regions: end slices in first scan and first slices in second scan grain_ids_ol1 = micros[0].grain_map[:, :, micros[0].grain_map.shape[2] - overlap:] grain_ids_ol2 = micros[1].grain_map[:, :, :overlap] dims_ol1 = np.array(grain_ids_ol1.shape) print(dims_ol1) dims_ol2 = np.array(grain_ids_ol2.shape) print(dims_ol2) # build a microstructure for the overlap region in each volumes grain_ids_ols = [grain_ids_ol1, grain_ids_ol2] micros_ol = [] for i in range(2): grain_ids_ol = grain_ids_ols[i] ids_ol = np.unique(grain_ids_ol) print(ids_ol) # difference due to the crop (restricting the grain map to the overlap region) #offset_mm = (2 * i - 1) * voxel_size * np.array([0., 0., grain_ids_ol.shape[2] - 0.5 * micros[i].grain_map.shape[2]]) # here we use an ad-hoc offset to voxel (0, 0, 0) in the full volume: offset is zero for the second volume offset_px = (i - 1) * np.array([0., 0., grain_ids_ol.shape[2] - micros[i].grain_map.shape[2]]) offset_mm = voxel_size * offset_px print('offset [px] is {}'.format(offset_px)) print('offset [mm] is {}'.format(offset_mm)) # make the microstructure micro_ol = Microstructure(name='%sol_' % micros[i].name) print('* building overlap microstructure %s' % micro_ol.name) micro_ol.set_lattice(lattice) micro_ol.grain_map = grain_ids_ol for gid in ids_ol: if gid < 1: print('skipping %d' % gid) continue g = Grain(gid, micros[i].get_grain(gid).orientation) array_bin = (grain_ids_ol == gid).astype(np.uint8) local_com = ndimage.measurements.center_of_mass(array_bin, grain_ids_ol) com_px = (local_com + offset_px - 0.5 * np.array(micros[i].grain_map.shape)) com_mm = voxel_size * com_px print('grain %2d center: %6.3f, %6.3f, %6.3f' % (gid, com_mm[0], com_mm[1], com_mm[2])) g.center = com_mm micro_ol.grains.append(g) #TODO recalculate position as we look at a truncated volume ''' micro_ol.recompute_grain_centers(verbose=True) for g in micro_ol.grains: g.center += offset_mm ''' # add the overlap microstructure to the list micros_ol.append(micro_ol) # match grain from micros_ol[1] to micros_ol[0] (the reference) matched, _, unmatched = micros_ol[0].match_grains(micros_ol[1], verbose=True) # the affine transform does not since to work, using a simpler method here delta_avg = np.zeros(3) for i in range(len(matched)): # look at the pair of grains match = matched[i] delta = micros_ol[0].get_grain(match[0]).center - micros_ol[1].get_grain(match[1]).center delta_avg += delta delta_avg /= len(matched) print('average shift (pixels):') print(delta_avg / voxel_size) translation = delta_avg translation_voxel = (delta_avg / voxel_size).astype(int) translation_voxel[0] += 2 print('translation is in mm: {}'.format(translation)) print('translation is in voxels {}'.format(translation_voxel)) """ from pymicro.view.vol_utils import compute_affine_transform # compute the affine transform n_points = len(matched) fixed = np.zeros((n_points, 3)) moving = np.zeros((n_points, 3)) moved = np.zeros_like(moving) # markers in ref grain map for i in range(n_points): fixed[i] = micros_ol[0].get_grain(matched[i][0]).center moving[i] = micros_ol[1].get_grain(matched[i][1]).center # call the registration method translation, transformation = compute_affine_transform(fixed, moving) invt = np.linalg.inv(transformation) # check what are now the points after transformation fixed_centroid = np.average(fixed, axis=0) moving_centroid = np.average(moving, axis=0) print('fixed centroid: {}'.format(fixed_centroid)) print('moving centroid: {}'.format(moving_centroid)) for j in range(n_points): moved[j] = fixed_centroid + np.dot(transformation, moving[j] - moving_centroid) print('point %d will move to (%6.3f, %6.3f, %6.3f) to be compared with (%6.3f, %6.3f, %6.3f)' % ( j, moved[j, 0], moved[j, 1], moved[j, 2], fixed[j, 0], fixed[j, 1], fixed[j, 2])) print('transformation is:') print(invt) # offset and translation, here we only look for rigid body translation offset = -np.dot(invt, translation) print(translation, offset) translation_voxel = (translation / voxel_size).astype(int) """ print(translation_voxel) # look at ids in the reference volume ids_ref = np.unique(micros[0].grain_map) ids_ref_list = ids_ref.tolist() if -1 in ids_ref_list: ids_ref_list.remove(-1) # grain overlap if 0 in ids_ref_list: ids_ref_list.remove(0) # background print(ids_ref_list) id_offset = max(ids_ref_list) print('grain ids in volume %s will be offset by %d' % (micros[1].name, id_offset)) # gather ids in the merging volume (will be modified) ids_mrg = np.unique(micros[1].grain_map) ids_mrg_list = ids_mrg.tolist() if -1 in ids_mrg_list: ids_mrg_list.remove(-1) # grain overlap if 0 in ids_mrg_list: ids_mrg_list.remove(0) # background print(ids_mrg_list) # prepare a volume with the same size as the second grain map, with grain ids renumbered and (X, Y) translations applied. grain_map_translated = micros[1].grain_map.copy() print('renumbering grains in the overlap region of volume %s' % micros[1].name) for match in matched: ref_id, other_id = match print('replacing %d by %d' % (other_id, ref_id)) #TODO should flag those grains so their center can be recomputed grain_map_translated[micros[1].grain_map == other_id] = ref_id try: ids_mrg_list.remove(other_id) except ValueError: # this can happend if a grain in reference volume was matched to more than 1 grain print('%d was not in list anymore' % other_id) # also renumber the rest using the offset renumbered_grains = [] for i, other_id in enumerate(ids_mrg_list): new_id = id_offset + i + 1 grain_map_translated[micros[1].grain_map == other_id] = new_id print('replacing %d by %d' % (other_id, new_id)) renumbered_grains.append([other_id, new_id]) # apply translation along the (X, Y) axes grain_map_translated = np.roll(grain_map_translated, translation_voxel[:2], (0, 1)) check = overlap // 2 print(grain_map_translated.shape) print(overlap) print(translation_voxel[2] + check) if plot: fig = plt.figure(figsize=(15, 7)) ax1 = fig.add_subplot(1, 3, 1) ax1.imshow(micros[0].grain_map[:, :, translation_voxel[2] + check].T, vmin=0) plt.axis('off') plt.title('micros[0].grain_map (ref)') ax2 = fig.add_subplot(1, 3, 2) ax2.imshow(grain_map_translated[:, :, check].T, vmin=0) plt.axis('off') plt.title('micros[1].grain_map (renumbered)') ax3 = fig.add_subplot(1, 3, 3) same_voxel = micros[0].grain_map[:, :, translation_voxel[2] + check] == grain_map_translated[:, :, check] print(same_voxel) #print(same_voxel.shape) #ax3.imshow(same_voxel.T, vmin=0, vmax=2) plt.axis('off') plt.title('voxels that are identicals') plt.savefig('merging_check1.pdf') # start the merging: the first volume is the reference overlap = micros[0].grain_map.shape[2] - translation_voxel[2] print('overlap is %d voxels' % overlap) z_shape = micros[0].grain_map.shape[2] + micros[1].grain_map.shape[2] - overlap print('vertical size will be: %d + %d + %d = %d' % ( micros[0].grain_map.shape[2] - overlap, overlap, micros[1].grain_map.shape[2] - overlap, z_shape)) shape_merged = np.array(micros[0].grain_map.shape) + [0, 0, micros[1].grain_map.shape[2] - overlap] print('initializing volume with shape {}'.format(shape_merged)) grain_ids_merged = np.zeros(shape_merged, dtype=np.int16) print(micros[0].grain_map.shape) print(micros[1].grain_map.shape) # add the non-overlapping part of the 2 volumes as is grain_ids_merged[:, :, :micros[0].grain_map.shape[2] - overlap] = micros[0].grain_map[:, :, :-overlap] grain_ids_merged[:, :, micros[0].grain_map.shape[2]:] = grain_map_translated[:, :, overlap:] # look at vertices with the same label print(micros[0].grain_map[:, :, translation_voxel[2]:].shape) print(grain_map_translated[:, :, :overlap].shape) print('translation_voxel[2] = %d' % translation_voxel[2]) print('micros[0].grain_map.shape[2] - overlap = %d' % (micros[0].grain_map.shape[2] - overlap)) same_voxel = micros[0].grain_map[:, :, translation_voxel[2]:] == grain_map_translated[:, :, :overlap] print(same_voxel.shape) grain_ids_merged[:, :, translation_voxel[2]:micros[0].grain_map.shape[2]] = grain_map_translated[:, :, :overlap] * same_voxel # look at vertices with a single label single_voxels_0 = (micros[0].grain_map[:, :, translation_voxel[2]:] > 0) & (grain_map_translated[:, :, :overlap] == 0) print(single_voxels_0.shape) grain_ids_merged[:, :, translation_voxel[2]:micros[0].grain_map.shape[2]] += micros[0].grain_map[:, :, translation_voxel[2]:] * single_voxels_0 single_voxels_1 = (grain_map_translated[:, :, :overlap] > 0) & (micros[0].grain_map[:, :, translation_voxel[2]:] == 0) print(single_voxels_1.shape) grain_ids_merged[:, :, translation_voxel[2]:micros[0].grain_map.shape[2]] += grain_map_translated[:, :, :overlap] * single_voxels_1 if plot: fig = plt.figure(figsize=(14, 10)) ax1 = fig.add_subplot(1, 2, 1) ax1.imshow(grain_ids_merged[:, 320, :].T) plt.axis('off') plt.title('XZ slice') ax2 = fig.add_subplot(1, 2, 2) ax2.imshow(grain_ids_merged[320, :, :].T) plt.axis('off') plt.title('YZ slice') plt.savefig('merging_check2.pdf') if hasattr(micros[0], 'mask') and hasattr(micros[1], 'mask'): mask_translated = np.roll(micros[1].mask, translation_voxel[:2], (0, 1)) # merging the masks mask_merged = np.zeros(shape_merged, dtype=np.uint8) # add the non-overlapping part of the 2 volumes as is mask_merged[:, :, :micros[0].mask.shape[2] - overlap] = micros[0].mask[:, :, :-overlap] mask_merged[:, :, micros[0].grain_map.shape[2]:] = mask_translated[:, :, overlap:] # look at vertices with the same label same_voxel = micros[0].mask[:, :, translation_voxel[2]:] == mask_translated[:, :, :overlap] print(same_voxel.shape) mask_merged[:, :, translation_voxel[2]:micros[0].mask.shape[2]] = mask_translated[:, :, :overlap] * same_voxel # look at vertices with a single label single_voxels_0 = (micros[0].mask[:, :, translation_voxel[2]:] > 0) & (mask_translated[:, :, :overlap] == 0) mask_merged[:, :, translation_voxel[2]:micros[0].mask.shape[2]] += ( micros[0].mask[:, :, translation_voxel[2]:] * single_voxels_0).astype(np.uint8) single_voxels_1 = (mask_translated[:, :, :overlap] > 0) & (micros[0].mask[:, :, translation_voxel[2]:] == 0) mask_merged[:, :, translation_voxel[2]:micros[0].mask.shape[2]] += ( mask_translated[:, :, :overlap] * single_voxels_1).astype(np.uint8) if plot: fig = plt.figure(figsize=(14, 10)) ax1 = fig.add_subplot(1, 2, 1) ax1.imshow(mask_merged[:, 320, :].T) plt.axis('off') plt.title('XZ slice') ax2 = fig.add_subplot(1, 2, 2) ax2.imshow(mask_merged[320, :, :].T) plt.axis('off') plt.title('YZ slice') plt.savefig('merging_check3.pdf') # merging finished, build the new microstructure instance merged_micro = Microstructure(name='%s-%s' % (micros[0].name, micros[1].name)) merged_micro.set_lattice(lattice) # add all grains from the reference volume merged_micro.grains = micros[0].grains #TODO recompute center of masses of grains in the overlap region print(renumbered_grains) # add all new grains from the merged volume for i in range(len(renumbered_grains)): other_id, new_id = renumbered_grains[i] g = micros[1].get_grain(other_id) new_g = Grain(new_id, Orientation.from_rodrigues(g.orientation.rod)) new_g.center = g.center print('adding grain with new id %d (was %d)' % (new_id, other_id)) merged_micro.grains.append(new_g) print('%d grains in merged microstructure' % merged_micro.get_number_of_grains()) # add the full grain map merged_micro.grain_map = grain_ids_merged if hasattr(micros[0], 'mask') and hasattr(micros[1], 'mask'): merged_micro.mask = mask_merged return merged_micro
[ "def union(self, other, temporal_iou_threshold=0.5, spatial_iou_threshold=0.6, strict=True, overlap='average', percentilecover=0.8, percentilesamples=100, activity=True, track=True):\n assert overlap in ['average', 'replace', 'keep'], \"Invalid input - 'overlap' must be in [average, replace, keep]\"\n assert spatial_iou_threshold >= 0 and spatial_iou_threshold <= 1, \"invalid spatial_iou_threshold, must be between [0,1]\"\n assert temporal_iou_threshold >= 0 and temporal_iou_threshold <= 1, \"invalid temporal_iou_threshold, must be between [0,1]\" \n assert percentilesamples >= 1, \"invalid samples, must be >= 1\"\n if not activity and not track:\n return self # nothing to do\n\n sc = self.clone() # do not change self yet, make a copy then merge at the end\n for o in tolist(other):\n assert isinstance(o, Scene), \"Invalid input - must be vipy.video.Scene() object and not type=%s\" % str(type(o))\n\n if strict:\n assert sc.filename() == o.filename(), \"Invalid input - Scenes must have the same underlying video. Disable this with strict=False.\"\n oc = o.clone() # do not change other, make a copy\n\n # Key collision?\n if len(set(sc.tracks().keys()).intersection(set(oc.tracks().keys()))) > 0:\n print('[vipy.video.union]: track key collision - Rekeying other... Use other.rekey() to suppress this warning.')\n oc.rekey()\n if len(set(sc.activities().keys()).intersection(set(oc.activities().keys()))) > 0:\n print('[vipy.video.union]: activity key collision - Rekeying other... Use other.rekey() to suppress this warning.') \n oc.rekey()\n\n # Similarity transform? Other may differ from self by a temporal scale (framerate), temporal translation (clip) or spatial isotropic scale (rescale)\n assert np.isclose(sc.aspect_ratio(), oc.aspect_ratio(), atol=1E-2), \"Invalid input - Scenes must have the same aspect ratio\"\n if sc.width() != oc.width():\n oc = oc.rescale(sc.width() / oc.width()) # match spatial scale\n if not np.isclose(sc.framerate(), oc.framerate(), atol=1E-3):\n oc = oc.framerate(sc.framerate()) # match temporal scale (video in oc will not match, only annotations)\n if sc.startframe() != oc.startframe():\n dt = (oc.startframe() if oc.startframe() is not None else 0) - (sc.startframe() if sc.startframe() is not None else 0)\n oc = oc.trackmap(lambda t: t.offset(dt=dt)).activitymap(lambda a: a.offset(dt=dt)) # match temporal translation of tracks and activities\n oc = oc.trackfilter(lambda t: ((not t.isdegenerate()) and len(t)>0), activitytrack=False) \n\n # Merge other tracks into selfclone: one-to-many mapping from self to other\n merged = {} # dictionary mapping trackid in other to the trackid in self, each track in other can be merged at most once\n for ti in sorted(sc.tracklist(), key=lambda t: len(t), reverse=True): # longest to shortest\n for tj in sorted(oc.tracklist(), key=lambda t: len(t), reverse=True): \n if ti.category() == tj.category() and (tj.id() not in merged) and tj.segment_percentilecover(sc.track(ti.id()), percentile=percentilecover, samples=percentilesamples) > spatial_iou_threshold: # mean framewise overlap during overlapping segment of two tracks\n sc.tracks()[ti.id()] = sc.track(ti.id()).union(tj, overlap=overlap) # merge duplicate/fragmented tracks from other into self, union() returns clone\n merged[tj.id()] = ti.id() \n print('[vipy.video.union]: merging track \"%s\"(id=%s) + \"%s\"(id=%s) for scene \"%s\"' % (str(ti), str(ti.id()), str(tj), str(tj.id()), str(sc))) \n oc.trackfilter(lambda t: t.id() not in merged, activitytrack=False) # remove duplicate other track for final union\n\n # Merge other activities into selfclone: one-to-one mapping\n for (i,j) in merged.items(): # i=id of other, j=id of self\n oc.activitymap(lambda a: a.replaceid(i, j) if a.hastrack(i) else a) # update track IDs referenced in activities for merged tracks\n for (i,ai) in sc.activities().items():\n for (j,aj) in oc.activities().items():\n if ai.category() == aj.category() and set(ai.trackids()) == set(aj.trackids()) and ai.temporal_iou(aj) > temporal_iou_threshold:\n oc.activityfilter(lambda a: a.id() != j) # remove duplicate activity from final union\n oc.activityfilter(lambda a: len(a.tracks())>0) # remove empty activities not merged\n\n # Union\n sc.tracks().update(oc.tracks())\n sc.activities().update(oc.activities())\n\n # Final union of unique tracks/activities\n if track:\n self.tracks(sc.tracklist()) # union of tracks only\n if activity:\n self.activities(sc.activitylist()) # union of activities only: may reference tracks not in self of track=False\n return self", "def construct_overlaps(self):\n overlapped_obstacles = [self.obstacles[0]]\n union = self.obstacles[0] # the union of all obstacles\n\n for obstacle in self.obstacles[1:]:\n # prepare obstacle and determine any overlaps\n # remember the cover -- geometric operations create new polygons\n # without them\n cover = obstacle.cover\n obstacle &= self.field # trim to self.field\n prepped = prep(obstacle) # prepping speeds up the test\n overlapped = [o for o in overlapped_obstacles\n if prepped.intersects(o)]\n\n for o_o in overlapped:\n # first remove the overlapped obstacle, then re-add, piece by\n # overlapped piece\n overlapped_obstacles.remove(o_o)\n unoverlapped = o_o - obstacle\n overlapped = o_o & obstacle\n\n # re-add the unoverlapped parts, but only polygons, not lines\n if type(unoverlapped) is Polygon:\n unoverlapped.cover = o_o.cover\n overlapped_obstacles.append(unoverlapped)\n elif type(unoverlapped) in [MultiPolygon, GeometryCollection]:\n for u in unoverlapped:\n if type(u) is Polygon:\n u.cover = o_o.cover\n overlapped_obstacles.append(u)\n\n # add the overlapped section(s), labeling them as covered by\n # both obstacles\n if type(overlapped) is Polygon:\n overlapped.cover = o_o.cover | cover\n overlapped_obstacles.append(overlapped)\n elif type(overlapped) in [MultiPolygon, GeometryCollection]:\n for o in overlapped:\n if type(o) is Polygon:\n o.cover = o_o.cover | cover\n overlapped_obstacles.append(o)\n\n # any part of the obstacle that's not overlapped another can be\n # added now\n new_obstacles = obstacle - union\n\n if type(new_obstacles) == Polygon:\n new_obstacles.cover = cover\n overlapped_obstacles.append(new_obstacles)\n elif type(new_obstacles) in [MultiPolygon, GeometryCollection]:\n for n in new_obstacles:\n if type(n) == Polygon:\n n.cover = cover\n overlapped_obstacles.append(n)\n union |= obstacle # update the union to contain all obstacles\n\n self.overlapped_obstacles = overlapped_obstacles\n self._current = True", "def get_structure(\n self,\n ) -> Structure:\n grain_0 = self.grain_0.get_structure(reconstruction=self.reconstruction)\n grain_1 = self.grain_1.get_structure(reconstruction=self.reconstruction)\n coords_0 = self.lattice.get_cartesian_coords(grain_0.frac_coords)\n coords_0[:, 2] = grain_0.cart_coords[:, 2]\n coords_1 = self.lattice.get_cartesian_coords(grain_1.frac_coords)\n coords_1[:, 2] = grain_1.cart_coords[:, 2]\n coords_1 = np.add(coords_1, self.grain_offset)\n site_properties = {\n k: np.concatenate([v, grain_1.site_properties[k]])\n for k, v in grain_0.site_properties.items()\n if k in grain_1.site_properties\n }\n site_properties[\"grain\"] = np.concatenate(\n [np.repeat(0, len(grain_0)), np.repeat(1, len(grain_1))]\n )\n grain_boundary = Structure(\n self.lattice,\n np.concatenate([grain_0.species, grain_1.species]),\n np.concatenate([coords_0, coords_1]),\n to_unit_cell=True,\n coords_are_cartesian=True,\n site_properties=site_properties,\n )\n if self.merge_tol is not None:\n grain_boundary.merge_sites(tol=self.merge_tol, mode=\"delete\")\n return grain_boundary", "def test_disjoint_union(self):\n a = bounding_box.BoundingBox(\n bounding_box.Point(5, 10), bounding_box.Size(5, 20)\n )\n b = bounding_box.BoundingBox(\n bounding_box.Point(15, 5), bounding_box.Size(15, 20)\n )\n self.assertEqual(bounding_box.calculate_union(a, b), 400)", "def crop(self, x_start, x_end, y_start, y_end, z_start, z_end):\n micro_crop = Microstructure()\n micro_crop.name = self.name + '_crop'\n print('cropping microstructure to %s' % micro_crop.name)\n micro_crop.grain_map = self.grain_map[x_start:x_end, y_start:y_end, z_start:z_end]\n if hasattr(self, 'mask'):\n micro_crop.mask = self.mask[x_start:x_end, y_start:y_end, z_start:z_end]\n grain_ids = np.unique(micro_crop.grain_map)\n for gid in grain_ids:\n if not gid > 0:\n continue\n micro_crop.grains.append(self.get_grain(gid))\n print('%d grains in cropped microstructure' % len(micro_crop.grains))\n return micro_crop", "def merge(cls, w1, w2):\n if not cls.is_metadata_equal(w1, w2):\n raise AssertionError(\"Metadata conflicts between worms to be \"\n \"merged.\")\n\n w1c = w1.to_canon\n w2c = w2.to_canon\n\n for worm_id in w2c.worm_ids:\n if worm_id in w1c.worm_ids:\n try:\n # Try to upsert w2c's data into w1c. If we cannot\n # without an error being raised, the data clashes.\n w1c._data[worm_id] = df_upsert(w1c._data[worm_id],\n w2c._data[worm_id])\n except AssertionError as err:\n raise AssertionError(\"Data conflicts between worms to \"\n \"be merged on worm {0}: {1}\"\n .format(str(worm_id), err))\n else:\n # The worm isn't in the 1st group, so just add it\n w1c._data[worm_id] = w2c._data[worm_id]\n\n # Sort w1c's list of worms\n w1c._data = sort_odict(w1c._data)\n\n # Create a fresh WCONWorms object to reset all the lazily-evaluated\n # properties that may change, such as num_worms, in the merged worm\n merged_worm = WCONWorms()\n merged_worm._data = w1c._data\n merged_worm.metadata = w2c.metadata\n merged_worm.units = w1c.units\n\n return merged_worm", "def merge_overlap_regions(\n histone_overlap_files,\n out_master_bed,\n # end here\n\n args, histone, method=\"atac_midpoint\", filter_w_overlap=True):\n # set up naive overlap based master regions\n logging.info(\"HISTONE: {}: Generating master regions...\".format(histone))\n args.chipseq[\"histones\"][histone][\"overlap_master_regions\"] = \"{0}/ggr.{1}.overlap.master.bed.gz\".format(\n args.folders[\"data_dir\"], histone)\n if not os.path.isfile(args.chipseq[\"histones\"][histone][\"overlap_master_regions\"]):\n histone_overlap_files = sorted(\n glob.glob(\"{0}/{1}\".format(\n args.chipseq[\"data_dir\"], args.chipseq[\"histones\"][histone][\"overlap_glob\"])))\n logging.info(\"Master regions using: {}\".format(\" \".join(histone_overlap_files)))\n merge_regions(histone_overlap_files, args.chipseq[\"histones\"][histone][\"overlap_master_regions\"])\n \n if method == \"atac_midpoint\":\n # If centered on ATAC region midpoint, then extract the midpoint and then extend out with bedtools slop\n args.atac[\"master_slop_bed\"] = \"{}.slop_{}bp.bed.gz\".format(\n args.atac[\"master_bed\"].split(\".bed\")[0],\n args.params[\"histones\"][histone][\"overlap_extend_len\"])\n # this bit actually belongs in ATAC? integrative?\n if not os.path.isfile(args.atac[\"master_slop_bed\"]):\n slop_bed = (\n \"zcat {0} | \"\n \"awk -F '\\t' 'BEGIN{{OFS=\\\"\\t\\\"}} \"\n \"{{ midpoint=$2+int(($3-$2)/2); \"\n \"$2=midpoint; $3=midpoint+1; print }}' | \"\n \"bedtools slop -i stdin -g {1} -b {2} | \"\n \"gzip -c > {3}\").format(\n args.atac[\"master_bed\"],\n args.annot[\"chromsizes\"],\n args.params[\"histones\"][histone][\"overlap_extend_len\"],\n args.atac[\"master_slop_bed\"])\n print slop_bed\n run_shell_cmd(slop_bed)\n\n if filter_w_overlap:\n # now intersect ATAC with the naive overlap files and only keep region if has an overlap\n args.chipseq[\"histones\"][histone][\"master_slop_marked_bed\"] = \"{}.{}-marked.bed.gz\".format(\n args.atac[\"master_slop_bed\"].split(\".bed\")[0],\n histone)\n if not os.path.isfile(args.chipseq[\"histones\"][histone][\"master_slop_marked_bed\"]):\n keep_marked = (\n \"bedtools intersect -u -a {0} -b {1} | \"\n \"gzip -c > {2}\").format(\n args.atac[\"master_slop_bed\"],\n args.chipseq[\"histones\"][histone][\"overlap_master_regions\"],\n args.chipseq[\"histones\"][histone][\"master_slop_marked_bed\"])\n print keep_marked\n run_shell_cmd(keep_marked)\n master_regions = args.chipseq[\"histones\"][histone][\"master_slop_marked_bed\"]\n \n else:\n master_regions = args.atac[\"master_slop_bed\"]\n\n elif method == \"naive_overlap\":\n # If naive overlap, don't do anything extra - already generated the master file\n master_regions = args.chipseq[\"histones\"][histone][\"overlap_master_regions\"]\n\n else:\n raise Exception(\"non existent master regions method!\")\n \n return master_regions", "def pickle_union(pickle_1, pickle_2, output_path):\n logging.info('pickle_union()')\n with open(pickle_1, 'rb') as f:\n map_1 = pickle.load(f)\n with open(pickle_2, 'rb') as f:\n map_2 = pickle.load(f)\n map_1.update(map_2)\n\n with open(output_path, 'wb') as f:\n pickle.dump(map_1, f, pickle.HIGHEST_PROTOCOL)\n\n return", "def match_grains(self, micro2, mis_tol=1, use_grain_ids=None, verbose=False):\n if not self.get_lattice().get_symmetry() == micro2.get_lattice().get_symmetry():\n raise ValueError('warning, microstructure should have the same symmetry, got: {} and {}'.\n format(self.get_lattice().get_symmetry(), micro2.get_lattice().get_symmetry()))\n candidates = []\n matched = []\n unmatched = [] # grain that were not matched within the given tolerance\n # restrict the grain ids to match if needed\n if use_grain_ids:\n grains_to_match = [self.get_grain(gid) for gid in use_grain_ids]\n else:\n grains_to_match = self.grains\n # look at each grain\n for i, g1 in enumerate(grains_to_match):\n cands_for_g1 = []\n best_mis = mis_tol\n best_match = -1\n for g2 in micro2.grains:\n # compute disorientation\n mis, _, _ = g1.orientation.disorientation(g2.orientation, crystal_structure=self.get_lattice().get_symmetry())\n misd = np.degrees(mis)\n if misd < mis_tol:\n if verbose:\n print('grain %3d -- candidate: %3d, misorientation: %.2f deg' % (g1.id, g2.id, misd))\n # add this grain to the list of candidates\n cands_for_g1.append(g2.id)\n if misd < best_mis:\n best_mis = misd\n best_match = g2.id\n # add our best match or mark this grain as unmatched\n if best_match > 0:\n matched.append([g1.id, best_match])\n else:\n unmatched.append(g1.id)\n candidates.append(cands_for_g1)\n if verbose:\n print('done with matching')\n print('%d/%d grains were matched ' % (len(matched), len(grains_to_match)))\n return matched, candidates, unmatched", "def merge_regions(\n out_path: str, sample1_id: int, regions1_file: File, sample2_id: int, regions2_file: File\n) -> File:\n\n def iter_points(regions):\n for start, end, depth in regions:\n yield (start, \"start\", depth)\n yield (end, \"end\", -depth)\n\n def iter_regions(points):\n first_point = next(points, None)\n if first_point is None:\n return\n start, _, depth = first_point\n\n for pos, kind, delta in points:\n if pos > start:\n yield (start, pos, depth)\n start = pos\n depth += delta\n\n regions1 = read_regions(regions1_file)\n regions2 = read_regions(regions2_file)\n points1 = iter_points(regions1)\n points2 = iter_points(regions2)\n points = iter_merge(points1, points2)\n regions = iter_regions(points)\n\n region_path = f\"{out_path}/regions/{sample1_id}_{sample2_id}.regions\"\n return write_regions(region_path, regions)", "def append(\n motion1, \n motion2, \n pivot_offset1=0,\n pivot_offset2=0,\n pivot_alignment=False,\n blend_length=0, \n blend_method=\"overlapping\"\n ):\n assert isinstance(motion1, (motion_class.Motion, vel_class.MotionWithVelocity))\n assert isinstance(motion2, (motion_class.Motion, vel_class.MotionWithVelocity))\n assert motion1.fps == motion2.fps\n assert motion1.skel.num_joints() == motion2.skel.num_joints()\n assert motion1.num_frames() > 0 or motion2.num_frames() > 0\n\n if motion1.num_frames() == 0:\n combined_motion = copy.deepcopy(motion2)\n combined_motion.name = f\"{motion1.name}+{motion2.name}\"\n return combined_motion\n\n if motion2.num_frames() == 0:\n combined_motion = copy.deepcopy(motion1)\n combined_motion.name = f\"{motion1.name}+{motion2.name}\"\n return combined_motion\n\n frame_source = motion1.time_to_frame(motion1.length() - pivot_offset1)\n frame_target = motion2.time_to_frame(pivot_offset2)\n\n # Translate and rotate motion2 to location of frame_source\n pose1 = motion1.get_pose_by_frame(frame_source)\n pose2 = motion2.get_pose_by_frame(frame_target)\n\n R1, p1 = conversions.T2Rp(pose1.get_root_transform())\n R2, p2 = conversions.T2Rp(pose2.get_root_transform())\n\n v_up_env = motion1.skel.v_up_env\n\n # Remove the translation of the pivot of the motion2\n # so that rotation works correctly\n dp = -(p2 - math.projectionOnVector(p2, v_up_env))\n motion2 = translate(motion2, dp)\n\n # Translation to be applied\n dp = p1 - math.projectionOnVector(p1, v_up_env)\n\n # Rotation to be applied\n Q1 = conversions.R2Q(R1)\n Q2 = conversions.R2Q(R2)\n _, theta = quaternion.Q_closest(Q1, Q2, v_up_env)\n dR = conversions.A2R(v_up_env * theta)\n\n motion2 = transform(\n motion2, conversions.Rp2T(dR, dp), pivot=0, local=False)\n\n combined_motion = copy.deepcopy(motion1)\n combined_motion.name = f\"{motion1.name}+{motion2.name}\"\n del combined_motion.poses[frame_source + 1 :]\n\n t_start = motion1.length()-blend_length\n t_processed = 0.0\n dt = 1 / motion2.fps\n for i in range(frame_target, motion2.num_frames()):\n t_processed += dt\n if blend_length > 0.0:\n alpha = min(1.0, t_processed / float(blend_length))\n else:\n alpha = 1.0\n # Do blending for a while (blend_length)\n if alpha < 1.0:\n if blend_method == \"propagation\":\n pose_out = blend(\n motion1.get_pose_by_time(t_start),\n motion2.get_pose_by_frame(i),\n alpha)\n elif blend_method == \"overlapping\":\n pose_out = blend(\n motion1.get_pose_by_time(t_start+t_processed),\n motion2.get_pose_by_frame(i),\n alpha)\n elif blend_method == \"inertialization\":\n # TODO\n raise NotImplementedError\n else:\n raise NotImplementedError\n else:\n pose_out = copy.deepcopy(motion2.get_pose_by_frame(i))\n combined_motion.add_one_frame(pose_out.data)\n\n # Recompute velocities if exists\n if isinstance(combined_motion, vel_class.MotionWithVelocity):\n combined_motion.compute_velocities()\n\n return combined_motion", "def merged_mask(basins, ds, lon_name='lon', lat_name='lat', merge_dict = None, verbose=False):\n mask = basins.mask(ds,lon_name=lon_name, lat_name=lat_name)\n\n def find_mask_index(name):\n target_value = [ri for ri in range(len(basins.regions)) if basins.regions[ri].name == name]\n if len(target_value) > 1:\n warnings.warn(f\"Found more than one matching region for {name}\")\n return target_value[0]\n elif len(target_value) == 1:\n return target_value[0]\n else:\n return None\n \n \n if merge_dict is None:\n merge_dict = _default_merge_dict()\n \n dict_keys = list(merge_dict.keys())\n number_dict = {k:None for k in dict_keys}\n merged_basins = []\n for ocean, small_basins in merge_dict.items():\n# ocean_idx = find_mask_index(ocean)\n try:\n ocean_idx = basins.map_keys(ocean)\n except(KeyError):\n #The ocean key is new and cant be found in the previous keys (e.g. for Atlantic full or maritime continent)\n ocean_idx = mask.max().data + 1\n number_dict[ocean] = ocean_idx\n if small_basins:\n for sb in small_basins:\n sb_idx = basins.map_keys(sb)\n #set the index of each small basin to the ocean value\n mask = mask.where(mask!=sb_idx, ocean_idx)\n merged_basins.append(sb)\n \n if verbose:\n remaining_basins = [str(basins.regions[ri].name) for ri in range(len(basins.regions)) if (basins.regions[ri].name not in merged_basins) and (basins.regions[ri].name not in list(merge_dict.keys()))]\n print(remaining_basins)\n\n #reset the mask indicies to the order of the passed dictionary keys\n mask_reordered = xr.ones_like(mask.copy()) * np.nan\n for new_idx, k in enumerate(dict_keys):\n old_idx = number_dict[k]\n mask_reordered = mask_reordered.where(mask!=old_idx, new_idx)\n\n return mask_reordered", "def intersect(self, other):\n # work only on the lowest level\n # TODO: Allow this to be done for regions with different depths.\n if not (self.maxdepth == other.maxdepth):\n raise AssertionError(\"Regions must have the same maxdepth\")\n self._demote_all()\n opd = set(other.get_demoted())\n self.pixeldict[self.maxdepth].intersection_update(opd)\n self._renorm()\n return", "def merge(self, other):\n # print \"merging\"\n self.min_DM = min(self.min_DM, other.min_DM)\n self.max_DM = max(self.max_DM, other.max_DM)\n self.min_time = min(self.min_time, other.min_time)\n self.max_time = max(self.max_time, other.max_time)\n self.merged = True\n # other is no loner an independent cluster\n other.true_cluster = False", "def test_union(self):\n a = bounding_box.BoundingBox(\n bounding_box.Point(5, 10), bounding_box.Size(5, 20)\n )\n b = bounding_box.BoundingBox(\n bounding_box.Point(7, 5), bounding_box.Size(15, 20)\n )\n self.assertEqual(bounding_box.calculate_union(a, b), 355)", "def merge(self, overlap=0.5, max_iter=2, k_nearest=10): \n def top_k(centers, target, k_nearest):\n distances = cdist(centers, asarray(target).reshape(1,2)).flatten()\n return argsort(distances)[0:k_nearest]\n\n def merge_once(initial):\n centers = asarray(initial.center)\n nearest = [top_k(centers, source.center, k_nearest) for source in initial]\n\n regions = []\n skip = []\n keep = []\n\n for ia, source in enumerate(initial):\n for ib in nearest[ia]:\n other = initial[ib]\n if not ia == ib and source.overlap(other) > overlap:\n source = source.merge(other)\n if ib not in keep:\n skip.append(ib)\n\n regions.append(source)\n keep.append(ia)\n\n return many([region for ir, region in enumerate(regions) if ir not in skip])\n\n regions = merge_once(self.regions)\n\n for _ in range(max_iter-1):\n regions = merge_once(regions)\n\n return ExtractionModel(regions)", "def test_merge_annotation(self):\n seg1_fn = os.path.join(TEST_DATA_PATH, u'sff', u'v0.8', u'annotated_emd_1014.json')\n seg2_fn = os.path.join(TEST_DATA_PATH, u'sff', u'v0.8', u'emd_1014.json')\n seg1 = adapter.SFFSegmentation.from_file(seg1_fn)\n seg2 = adapter.SFFSegmentation.from_file(seg2_fn)\n # perform the notes merge\n seg1.merge_annotation(seg2)\n self.assertEqual(seg1.name, seg2.name)\n self.assertEqual(seg1.software_list, seg2.software_list)\n self.assertEqual(seg1.details, seg2.details)\n self.assertEqual(seg1.global_external_references, seg2.global_external_references)\n for segment in seg1.segment_list:\n other_segment = seg2.segment_list.get_by_id(segment.id)\n self.assertEqual(segment.biological_annotation.external_references,\n other_segment.biological_annotation.external_references)\n self.assertNotEqual(segment.colour, other_segment.colour)\n # test that we can merge colours too!\n seg1.merge_annotation(seg2, include_colour=True)\n for segment in seg1.segment_list:\n other_segment = seg2.segment_list.get_by_id(segment.id)\n self.assertEqual(segment.biological_annotation.external_references,\n other_segment.biological_annotation.external_references)\n self.assertEqual(segment.colour, other_segment.colour)", "def merge(self, spectra):\n i0 = spectra.ispecmin-self.ispecmin\n iispec = slice(i0, i0+spectra.nspec)\n \n i1 = spectra.ifluxmin-self.ifluxmin\n iiflux = slice(i1, i1+spectra.nflux)\n \n self.flux[iispec, iiflux] = spectra.flux\n self.ivar[iispec, iiflux] = spectra.ivar\n\n if self.xflux is not None:\n self.xflux[iispec, iiflux] = spectra.xflux\n\n for i in range(spectra.nspec):\n j = (spectra.ispecmin - self.ispecmin) + i\n if self.R[j] is None:\n full_range = self.ifluxmin, self.ifluxmin + self.nflux\n self.R[j] = ResolutionMatrix.blank(bandwidth=15, \\\n nflux=self.nflux, full_range=full_range)\n \n self.R[j].merge(spectra.R[i])\n \n if self.pix is None:\n if spectra.pix is not None:\n self.pix = spectra.pix.copy()\n self.xmin = spectra.xmin\n self.ymin = spectra.ymin\n else:\n pass\n elif spectra.pix is not None:\n xmin = min(self.xmin, spectra.xmin)\n ymin = min(self.ymin, spectra.ymin)\n xmax = max(self.xmax, spectra.xmax)\n ymax = max(self.ymax, spectra.ymax)\n nxtot = xmax-xmin+1\n nytot = ymax-ymin+1\n pix = N.zeros((nytot, nxtot))\n for spec in self, spectra:\n ny, nx = spec.pix.shape\n x0 = spec.xmin - xmin\n y0 = spec.ymin - ymin\n #- Add, not replace pixels\n pix[y0:y0+ny, x0:x0+nx] += spec.pix\n \n self.pix = pix\n self.xmin = xmin\n self.ymin = ymin", "def overlap(self, other):\n\n assert isinstance(other, Region), 'other must be a subclass of Region.'\n\n if self.shapely.intersects(other.shapely) is False:\n return False\n\n return OverlapRegion(self.shapely.intersection(other.shapely))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the average of the elements of array a.
def mean(a): return sum(a) / float(len(a))
[ "def avg_vec(a):\n avg = numpy.average(a, axis=0)\n arr = numpy.asarray(avg)\n return arr", "def average(data):\n return 1.0*sum(data)/len(data)", "def average(x):\r\n assert len(x) > 0\r\n return float(sum(x)) / len(x)", "def func(arr):\n return arr.mean()", "def average(data):\n counts = len(data)\n total = sum(data)\n return total / counts", "def get_avg(input_list):\n return sum(input_list)/len(input_list)", "def mean(self, numbers, index):\n return float(sum([item[index] for item in numbers]) /\n max(len(numbers), 1))", "def _moving_average(a: np.ndarray, n: int) -> np.ndarray:\n b = np.copy(a)\n b = np.insert(b, 0, np.full(n, a[0]))\n s = np.cumsum(b)\n res = (s[n:] - s[:-n]) / n\n return res", "def find_average(input_list):\r\n return sum(input_list)/len(input_list)", "def __calculate_average(self, list):\n return reduce(lambda x, y: x + y, list) / len(list)", "def get_avg_score(score_array):\n return np.mean(score_array)", "def mean(data):\n try:\n running_sum = 0\n for x in data:\n running_sum += x\n return running_sum/len(data)\n except ZeroDivisionError:\n raise TypeError(\"needs at least one argument\")", "def averages(*args):\r\n \r\n numbers = []\r\n\r\n for i in args:\r\n numbers.append(i)\r\n\r\n media = mean(numbers)\r\n\r\n return media", "def weighted_average(var: ArrayLike, eta_filter: np.ndarray) -> ArrayLike:\n if len(var) == 1:\n return var\n\n var_array = np.tile(var, (np.shape(eta_filter)[0], 1))\n average = np.average(var_array, weights=eta_filter, axis=1)\n return average", "def mean(iterable):\r\n return math.fsum(iterable) / max(len(iterable), 1)", "def avg_Ao(self):\n ...", "def mad(a: np.ndarray) -> np.ndarray:\n\n a = np.asarray(a)\n out = np.mean(np.absolute(a - np.mean(a)))\n return out", "def mean(X):\n return(float(sum(X))/ len(X))", "def average_function(a, b):\n c = (a**2 + b**2) / 2\n return c" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the standard deviation of the elements of array a.
def stddev(a): return math.sqrt(var(a))
[ "def stdev(self, nums):\n mean = float(sum(nums)) / len(nums)\n return math.sqrt(sum((n - mean) ** 2 for n in nums) / float(len(nums)))", "def stddev(data):\n counts = len(data)\n ave = average(data)\n total = sum(data * data)\n return (total - ave**2) / counts", "def standard_deviation(data, sample=True):\n return math.sqrt(variance(data, sample))", "def stddev(values):\n total = 0.0\n totalSquared = 0.0\n n = 0\n\n for value in values:\n total += value\n totalSquared += value * value\n n += 1\n\n # Need at least two values.\n if n < 2:\n raise InsufficientData\n\n n = float(n)\n return sqrt((totalSquared - total * total / n) / (n - 1))", "def StandardDeviation(values):\n if type(values) is not list:\n raise str(\"Invalid input exception \")\n\n n = len(values)\n mean = 0\n for v in values:\n mean += v\n\n if n == 0:\n return 0\n\n mean = 0\n\n sum = 0\n for v in values:\n sum += math.pow(v - mean, 2)\n\n SD = math.sqrt(sum / n)\n return SD", "def stdeviation(data):\n return statistics.stdev(data)", "def stdDev(X):\n\tmean = float(sum(X))/len(X)\n\ttot = 0.0\n\tfor x in X:\n\t\ttot += (x - mean)**2\n\treturn (tot/len(X))**0.5\t# square root of mean squared difference", "def stddev(lst):\n mn = mean(lst)\n variance = sum([(e-mn)**2 for e in lst])\n return sqrt(variance)", "def calculate_stdev(self):\n\n return np.array(self.data).std()", "def stddev(self):\n m = self.mean()\n n = np.sum(self.counts)\n dx = self.axis().center - m \n return np.sqrt(np.sum(self.counts*dx**2)/n)", "def std(*list: float) -> float:\n # Get mean of list elements.\n mean_value = mean(*list)\n\n # Get number of list elements.\n size = len(list)\n # Get sum of squared deviations.\n total = 0\n for e in list:\n diff = e - mean_value\n total += diff * diff\n\n # Get standard deviation of list of elements.\n return exp.radical(total/size, 2)", "def __non_zero_std__(inArray):\n # type: (numpy.ndarray) -> float\n return inArray[numpy.nonzero(inArray)[0]].std()", "def std(signal):\n return np.std(signal)", "def semideviation(r):\n is_negative = r < 0 # predicate/mask to select negative entries\n return r[is_negative].std(ddof=0)", "def std(self, *, axis=1):\n try:\n stds = np.nanstd(self.data,axis=axis).squeeze()\n if stds.size == 1:\n return np.asscalar(stds)\n return stds\n except IndexError:\n raise IndexError(\"Empty RegularlySampledAnalogSignalArray cannot calculate standard deviation\")", "def standardize(arr):\n return (arr - np.mean(arr)) / np.std(arr)", "def semideviation(r):\n return r[r<0].std(ddof=0)", "def stdev(inList):\r\n avg = Average(inList)\r\n i = 0\r\n sumSqrDiff = 0.0\r\n for x in inList:\r\n if x == None: continue ##Added 050709\r\n x = float(x)\r\n i += 1\r\n sumSqrDiff += math.pow(avg - x,2)\r\n return math.sqrt(sumSqrDiff/i)", "def sd(x, na_rm=False):\n # ==========================================================================\n # TODO: implement na_rm\n # TODO: consider adding option to calculate biased sample sd, dividing by n\n # TODO: consider adding trim as an argument and implementing it\n return tstd(x, limits=None, inclusive=(True, True))", "def stdrange(a, z=5):\n return (np.mean(a) - z * np.std(a), np.mean(a) + z * np.std(a))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the median of the elements of array a.
def median(a): b = list(a) # Make a copy of a. b.sort() length = len(b) if length % 2 == 1: return b[length//2] else: return float(b[length//2 - 1] + b[length//2]) / 2.0
[ "def findMedianSortedArray(self, A):\n S = len(A)\n m = S//2\n if S % 2 == 1:\n return A[m]\n return 0.5*(A[m] + A[m-1])", "def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):\n return _statistics._median(a, axis, out, overwrite_input, keepdims)", "def findMedianSortedArrays(self, a, b):\n n = len(a) + len(b)\n if n % 2 == 0:\n # If the total length is even, take the average of the two medians.\n return (self._findKth(a, 0, b, 0, n // 2) +\n self._findKth(a, 0, b, 0, n // 2 + 1)) / 2.0\n else:\n return self._findKth(a, 0, b, 0, n // 2 + 1)", "def median(numbers):\n assert len(numbers) > 3, 'Length of Numbers Must be more than 3'\n assert isinstance(numbers, collections.Iterable), 'Is Iterable'\n numbers = sorted(numbers)\n center = len(numbers) // 2\n if len(numbers) % 2 == 0:\n return sum(numbers[center - 1:center + 1]) // 2\n else:\n return numbers[center]", "def median(iterable):\n\n items = sorted(iterable)\n if len(items) == 0:\n raise ValueError(\"mean() arg is an empty sequence\")\n median_index = (len(items) -1 ) // 2\n if len(items) %2 != 0:\n return items[median_index]\n return (items[median_index] + items[median_index + 1]) / 2.0", "def median(data):\n try:\n data = sorted(list(data))\n n = len(data)\n if n%2==0:\n return (data[(n//2)-1]+data[n//2])/2\n else:\n return data[n//2]\n except IndexError:\n raise TypeError(\"needs at least one argument\")", "def median(self) -> DataValue:\n return median(self.iterable)", "def median(self):\r\n\t\treturn np.median(self.dataset)", "def median_filter(data: np.ndarray, kernel_size: int = 3) -> np.ndarray:\n return scipy.signal.medfilt(data, kernel_size)", "def median(self):\n\n values = sorted(self._values)\n if len(values) % 2:\n return values[int((len(values) - 1) / 2)]\n midway = int(self.length / 2)\n return (values[midway - 1] + values[midway]) / 2", "def simple_median(a: float, b: float, c: float) -> float:\n if a > b and b > c:\n return b\n elif c > b and b > a:\n return b\n elif c > a and a > b:\n return a\n elif b > a and a > c:\n return a\n elif a > c and c > b:\n return c\n elif b > c and c > a:\n return c", "def move_median(a, window, min_count=None, axis=-1): # real signature unknown; restored from __doc__\n pass", "def findMedianSortedArrays(self, nums1, nums2):\n pass", "def median(data_set):\n data_set_length = len(data_set)\n sorted_data_set = sorted(data_set)\n midpoint = data_set_length // 2\n if data_set_length % 2:\n return sorted_data_set[midpoint]\n else:\n hi = sorted_data_set[midpoint]\n lo = sorted_data_set[midpoint - 1]\n return (hi + lo) / 2", "def median(self):\n return np.median(self.counts)", "def return_median(lst):\n\n return lst[int(math.ceil(len(lst)/2))]", "def choose_median_index(a_list):\n len_list = len(a_list)\n # first, last, and middle entries\n p1 = a_list[0]\n p2 = a_list[ceil((len_list / 2) - 1)]\n p3 = a_list[len_list - 1]\n # if middle entry is between first and last\n if (p1 <= p2 <= p3) or (p3 <= p2 <= p1):\n median_index = ceil((len_list / 2) - 1)\n # else if first entry is between middle and last\n elif (p2 <= p1 <= p3) or (p3 <= p1 <= p2):\n median_index = 0\n # else last entry is between first and middle\n else:\n median_index = len_list - 1\n return median_index", "def median(lst):\n tmp_lst = lst.copy()\n quick_sort(tmp_lst)\n return tmp_lst[(len(lst)-1) // 2]", "def median(images):\n return np.median(np.dstack(images), axis=2).astype(np.uint8)", "def median(self):\n\n # generate combined list of all pixels in CCD called 'arr'\n larr = []\n for win in self._data:\n larr.append(win.flatten())\n arr = np.concatenate(larr)\n\n return np.median(arr)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot the elements of array a as points.
def plotPoints(a): n = len(a) stddraw.setXscale(-1, n) stddraw.setPenRadius(1.0 / (3.0 * n)) for i in range(n): stddraw.point(i, a[i])
[ "def plot_point(points: Union[Point, List[Point]], display=True):\n if isinstance(points, Point):\n points = [points]\n\n for point in points:\n plt.plot(point.x, point.y, \"-o\")\n plt.axes().set_aspect(\"equal\", \"datalim\")\n if display:\n plt.show()", "def graph_scatter(arr, color='green', title='Scatter Plot of Given Points', x_label='X', y_label='Y'):\n plt.style.use('ggplot')\n\n x, y = [], []\n for point in arr:\n x.append(point[0])\n y.append(point[1])\n\n fig = plt.figure()\n axes = fig.add_axes([0,0,1,1])\n axes.scatter(x, y, color=color)\n axes.set_xlabel(x_label)\n axes.set_ylabel(y_label)\n axes.set_title(title)\n plt.show()", "def plot_points(coordAmp):\n xValues = coordAmp.loc[:, 'xPos 1'::8]\n yValues = coordAmp.loc[:, 'yPos 1'::8]\n plt.scatter(xValues, yValues)\n plt.show()", "def plot_res(n):\n x = np.array([i for i in range(n)])\n y = gen_array_2(n)\n plt.plot(x, y, 'o')\n plt.show()", "def plot(arr, vary = None, labels = None):\n from matplotlib import pyplot as plt\n from numpy import where\n plt.ion()\n if vary is None:\n vary = arr[:,0]*0+1\n vary[0] = vary[-1] = 0\n x = arr[:,0]\n y = arr[:,1]\n x_data = x[where(vary != 0)]\n y_data = y[where(vary != 0)]\n x_center = x[where(vary == 0)]\n y_center = y[where(vary == 0)]\n if labels is None:\n n = list(range(len(x-2)))\n fig, ax = plt.subplots()\n ax.plot(x, y, '-o')\n for i, txt in enumerate(n):\n ax.annotate(txt, (x[i], y[i]))\n plt.show()", "def plot_tri_array(array,ax):\n\tfor tri in array:\n\t\ttri_new = tri + [tri[0]]\n\t\ttri_x = [corner[0] for corner in tri_new]\n\t\ttri_y = [corner[1] for corner in tri_new]\n\t\tax.plot(tri_x,tri_y)", "def plot(self, ax, a, qs=None, **kwargs):\n if qs is None:\n qs = np.linspace(0, 2, 100)\n ts = np.array([self([q], a) for q in qs])\n ax.plot(qs, ts, **kwargs)", "def plotLines(a):\n n = len(a)\n stddraw.setXscale(-1, n)\n stddraw.setPenRadius(0.0)\n for i in range(1, n):\n stddraw.line(i-1, a[i-1], i, a[i])", "def converts_spikes_into_plot(spike_numpy_array, x, y, step):\n\n for i,j in zip(spike_numpy_array,range(len(spike_numpy_array))):\n if i==1: # Is there a spike in the index j?\n x.append(step)\n y.append(j)\n\n return (x,y)", "def plot_points(points: list, path: list) -> None:\n x = [x[0] for x in points]\n y = [y[1] for y in points]\n\n plot.plot(x, y, '.', color='black') \n\n path_points = []\n for v in path:\n path_points.append(points[v])\n plot.text(points[v][0], points[v][1], str(v + 1), fontsize=8)\n\n data = np.array(path_points)\n plot.plot(data[:, 0], data[:, 1])\n\n plot.show()", "def _plot_points(self, tags: bool = False):\n for point in self._data:\n if tags:\n tag_dot(self._canvas, *self._get_px(point), tag=str(point), radius=DOT_RADIUS_PX)\n else:\n dot(self._canvas, *self._get_px(point), radius=DOT_RADIUS_PX)", "def plot_from_array(self, arr):\n fig, axes = plt.subplots(1, 1, figsize=(self.width, self.height))\n plotopts = {\n 'aspect': self.aspect,\n 'cmap': cm.cmap_d[self.cmap],\n 'vmin': self.cmin,\n 'vmax': self.cmax,\n }\n plt.imshow(arr.reshape(self.plt_shape), **plotopts)\n if self.grid:\n plt.grid()\n if self.axes:\n plt.colorbar()\n plt.title(self.title)\n extrakwargs = {}\n else:\n extent = axes.get_window_extent().transformed(\n fig.dpi_scale_trans.inverted()\n )\n if not self.grid:\n plt.axis('off')\n extrakwargs = {\n 'bbox_inches': extent,\n 'pad_inches': 0,\n }\n outfile = BytesIO()\n fig.savefig(\n outfile,\n format='png',\n transparent=True,\n dpi=self.dpi,\n **extrakwargs\n )\n outfile.seek(0)\n return self.Result(image=outfile)", "def plot(x,y,best_threshold_index_array):\r\n\r\n #plotting the pyplot\r\n mp.plot(x, y, '.-')\r\n\r\n\r\n #title of the plot\r\n mp.title(\"Missclassification as a function of threshold\")\r\n\r\n\r\n #assigning the x label , y label\r\n mp.xlabel(\"Threshold in MPH\")\r\n mp.ylabel(\"Missclassification \")\r\n\r\n #plotting points with lowest misclassification rate\r\n for i in range(len(best_threshold_index_array)):\r\n index = best_threshold_index_array[i]\r\n mp.scatter(x[index], y[index], s=90, marker=\"H\", facecolors='none', edgecolors='r')\r\n\r\n mp.show()", "def visual_graph(self, point_list):\n x = []\n y = []\n # # print len(e)\n for index in point_list:\n for i in index:\n x.append(i[0])\n y.append(i[2])\n plt.scatter(x, y, label=\"stars\", color=\"green\",\n marker=\"*\", s=50)\n plt.plot(x, y)\n plt.legend()\n plt.show()", "def visualize(X, Y):\n plt.plot(X, Y, \"bx\")\n plt.show()", "def drawKeypoints_Array(img0,pts):\n f,ax2 = plt.subplots(1, 1)\n cols = pts[:,0]\n rows = pts[:,1]\n ax2.imshow(cv2.cvtColor(img0, cv2.COLOR_BGR2RGB))\n ax2.scatter(cols, rows)\n plt.show()", "def plot_relation(a):\n\n\n rent = dataset[:,27]\n\n index = list(i for i in range(0, len(rent)) if rent[i] == '\\\\N' or pd.isnull(rent[i]))\n index2 = list(i for i in range(0, len(a)) if a[i] == '\\\\N' or pd.isnull(a[i]))\n\n a = np.delete(a, index + index2).astype(float)\n rent = np.delete(rent, index + index2).astype(float)\n\n plt.scatter(a, rent)\n plt.show()", "def plot(self, Q):\n self.new_figure()\n plt.plot(*self.split_array(Q))", "def plot_real_P_dot_data():\n N = REAL_P_DOT_NUMBERS\n P = np.arange(0.0e-14, 4.0e-14, 0.2e-14)\n plt.scatter(P, N, label=\"N(P_dot)_real\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot the elements of array a as line endpoints.
def plotLines(a): n = len(a) stddraw.setXscale(-1, n) stddraw.setPenRadius(0.0) for i in range(1, n): stddraw.line(i-1, a[i-1], i, a[i])
[ "def plot_tri_array(array,ax):\n\tfor tri in array:\n\t\ttri_new = tri + [tri[0]]\n\t\ttri_x = [corner[0] for corner in tri_new]\n\t\ttri_y = [corner[1] for corner in tri_new]\n\t\tax.plot(tri_x,tri_y)", "def plotPoints(a):\n n = len(a)\n stddraw.setXscale(-1, n)\n stddraw.setPenRadius(1.0 / (3.0 * n))\n for i in range(n):\n stddraw.point(i, a[i])", "def plot_res(n):\n x = np.array([i for i in range(n)])\n y = gen_array_2(n)\n plt.plot(x, y, 'o')\n plt.show()", "def plot(arr, vary = None, labels = None):\n from matplotlib import pyplot as plt\n from numpy import where\n plt.ion()\n if vary is None:\n vary = arr[:,0]*0+1\n vary[0] = vary[-1] = 0\n x = arr[:,0]\n y = arr[:,1]\n x_data = x[where(vary != 0)]\n y_data = y[where(vary != 0)]\n x_center = x[where(vary == 0)]\n y_center = y[where(vary == 0)]\n if labels is None:\n n = list(range(len(x-2)))\n fig, ax = plt.subplots()\n ax.plot(x, y, '-o')\n for i, txt in enumerate(n):\n ax.annotate(txt, (x[i], y[i]))\n plt.show()", "def plot_segments(arry, daybreak, colour):\n\n arry = np.array(arry).transpose()\n arry = arry[:,arry[0].argsort()]\n days, vals = arry\n days -= firstday\n breaks = np.where(days[1:] - days[:-1] > daybreak)[0] + 1\n sp = nd = 0\n for b in breaks:\n nd = b\n plt.plot(days[sp:nd], vals[sp:nd], color=colour)\n sp = nd\n if nd < vals.size:\n plt.plot(days[nd:], vals[nd:], color=colour)", "def DrawLineToArray(ary, xa, ya, xb, yb, color, alpha=255):\n\n xa, xb = xa + 0.5, xb + 0.5\n ya, yb = ya + 0.5, yb + 0.5\n if abs(xb - xa) > abs(yb - ya):\n if xa > xb:\n xa, xb = xb, xa\n ya, yb = yb, ya\n x_ary = np.arange(xa, xb).astype(np.int)\n y_ary = np.linspace(ya, yb, num=x_ary.size).astype(np.int)\n else:\n if ya > yb:\n xa, xb = xb, xa\n ya, yb = yb, ya\n y_ary = np.arange(ya, yb).astype(np.int)\n x_ary = np.linspace(xa, xb, num=y_ary.size).astype(np.int)\n\n dest = ary[x_ary, y_ary]\n r = (color[0] * alpha + ((dest >> 16) & 0xff) * (255 - alpha)) / 256\n g = (color[1] * alpha + ((dest >> 8) & 0xff) * (255 - alpha)) / 256\n b = (color[2] * alpha + ((dest >> 0) & 0xff) * (255 - alpha)) / 256\n ary[x_ary, y_ary] = (r << 16) | (g << 8) | (b << 0)", "def simple_lines(self):\n counts_flat = numpy.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n counts_jiggle = numpy.array([0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0])\n counts_ascending = numpy.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n counts_descending = numpy.array([8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0])\n lines = []\n lines.append(PlotLine(counts_flat, rank=1))\n lines.append(PlotLine(counts_jiggle, rank=1))\n lines.append(PlotLine(counts_ascending, rank=1))\n lines.append(PlotLine(counts_descending, rank=1))\n return lines", "def plot(self, ax, a, qs=None, **kwargs):\n if qs is None:\n qs = np.linspace(0, 2, 100)\n ts = np.array([self([q], a) for q in qs])\n ax.plot(qs, ts, **kwargs)", "def line_array(array):\n x_upper = []\n x_lower = []\n for i in range(1, len(array) - 1):\n start_a, start_b = draw_line(i, array)\n end_a, end_b = stop_line(i, array)\n if start_a >= 7 and start_b >= 5:\n x_upper.append(i)\n if end_a >= 5 and end_b >= 7:\n x_lower.append(i)\n return x_upper, x_lower", "def plotE(ax, endpoints, modules):\n xs = endpoints[:,0]\n ys = endpoints[:,1]\n zs = endpoints[:,2]\n\n for i, mod in zip(range(1, len(modules)+1), modules):\n # plot the lines between the points\n ax.plot3D([xs[i-1], xs[i]],\n [ys[i-1],ys[i]],\n [zs[i-1], zs[i]], linewidth=5, label=mod)", "def plotMulticolorLine(ax,xs,ys,zs,cmap='viridis',n_interp=50,**kwargs):\n\n xs = linearInterpolate(xs,n_interp)\n ys = linearInterpolate(ys,n_interp)\n zs = linearInterpolate(zs,n_interp)\n\n n_interp = max(3,n_interp)\n points = np.array([xs, ys]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n\n ## duplicate the final entry because otherwise it's ignored and you don't\n ## make it to zs[-1] ever, no matter how many n_interp you have\n segments = np.append(segments,segments[-1:],axis=0)\n zs = np.append(zs,zs[-1])\n\n lc = LineCollection(segments, cmap=cmap,norm=plt.Normalize(0, 1),**kwargs)\n lc.set_array(zs)\n lc.set_linewidth(3)\n ax.add_collection(lc)", "def plotline(ax, z1, z2, *args, **kwargs):\n return ax.plot((z1.real, z2.real), (z1.imag, z2.imag), *args, **kwargs)", "def visualize(X, Y):\n plt.plot(X, Y, \"bx\")\n plt.show()", "def draw_line(p1, p2, *varargin, **others):\n \n plt.plot([p1[0], p2[0]], [p1[1], p2[1]], *varargin, **others)", "def plot_linestrings(linestrings):\n for linestring in linestrings:\n for line in zip(linestring.coords, linestring.coords[1:]):\n plt.plot(*zip(*line)) # I know...", "def show_lineplot(self, *args, **kwargs):\n raise NotImplementedError()", "def plot_lines(p1, p2, cond, color, line_width=.4):\n p1_in = p1[cond]\n p2_in = p2[cond]\n for i in range(p1_in.shape[0]):\n x_s = [p1_in[i, 0], p2_in[i, 0]]\n y_s = [p1_in[i, 1], p2_in[i, 1]]\n plt.plot(x_s, y_s, color+\"-\", lw=line_width)", "def plot(self, signals):\n fig = plt.figure()\n for i in range(len(signals)):\n ax = fig.add_subplot(len(signals), 1, i + 1)\n ax.plot(signals[i])\n plt.show()", "def plot_line(self, x, y, data, fig=None):\n\n if fig is None:\n fig = plt.figure(self._figure_name)\n fig.clf()\n fig.add_subplot(111)\n ax = fig.gca()\n ax.set_title(\n \"{0:s} line={1:d}\".format(\n self.lineplot_pars[\"title\"][0], int(\n y + 1)))\n ax.set_xlabel(self.lineplot_pars[\"xlabel\"][0])\n ax.set_ylabel(self.lineplot_pars[\"ylabel\"][0])\n\n if not self.lineplot_pars[\"xmax\"][0]:\n xmax = len(data[y, :])\n else:\n xmax = self.lineplot_pars[\"xmax\"][0]\n ax.set_xlim(self.lineplot_pars[\"xmin\"][0], xmax)\n\n if self.lineplot_pars[\"logx\"][0]:\n ax.set_xscale(\"log\")\n if self.lineplot_pars[\"logy\"][0]:\n ax.set_yscale(\"log\")\n\n if self.lineplot_pars[\"pointmode\"][0]:\n ax.plot(data[y, :], self.lineplot_pars[\"marker\"][0])\n else:\n ax.plot(data[y, :])\n\n plt.draw()\n plt.show(block=False)\n time.sleep(self.sleep_time)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot the elements of array a as bars.
def plotBars(a): n = len(a) stddraw.setXscale(-1, n) for i in range(n): stddraw.filledRectangle(i-0.25, 0.0, 0.5, a[i])
[ "def bar_chart(values, xticks, title, xlabel, ylabel, barColor='b', barAlpha=1):\n if len(values)!=len(xticks):\n print 'Error: debe haber tantos grupos como etiquetas de barras.'\n return\n #Draw graph\n ind=np.arange(len(values))\n width=0.5\n p1 = plt.bar(ind, values, width, color=barColor, alpha=barAlpha)\n #Draw labels and titles\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.title(title)\n plt.xticks(ind+width/2., xticks ,rotation='horizontal')\n #Show\n plt.show()\n return p1", "def graph_frequency_histogram(arr, bar_color='green', title='Graph of Frequencies'):\n plt.style.use('ggplot')\n\n dictionary = bf.frequency(arr)\n keys = dictionary.keys()\n values = [dictionary[i] for i in keys]\n x_pos = [i for i in range(len(keys))]\n\n plt.bar(x_pos, values, color=bar_color)\n plt.title(title)\n plt.xticks(x_pos, keys)\n plt.show()", "def bar_graph(self, dataset):\n return self._plot_standard('bar', dataset)", "def bar_graph():\n N = 3\n before_aug = (75.56, 85.29, 82.86)\n after_aug = (82.22, 87.18, 91.89)\n\n ind = np.arange(N)\n width = 0.35\n plt.bar(ind, before_aug, width, label='Before Augmentation')\n plt.bar(ind + width, after_aug, width, label='After Augmentation')\n\n plt.ylabel('Percentage')\n plt.title('Comparison between Evaluation Criteria before nd after augmentation')\n\n plt.xticks(ind + width / 2, ('Accuracy', 'Precision', 'Recall'))\n plt.legend(loc='best')\n plt.show()", "def stacked_plot(x, groups, x_bins, ax, color_array=None, use_percent=False):\n\n x_radius = (x_bins[1] - x_bins[0]) / 2.0\n\n # ind = np.arange(len(x_bins))\n\n group_vals = np.unique(groups)\n num_groups = len(group_vals)\n\n p_array = np.zeros((num_groups, len(x_bins)))\n\n for x_idx, x_cent in enumerate(x_bins):\n\n x_mask = (x >= x_cent - x_radius) \\\n & (x < x_cent + x_radius)\n\n # y_bin = y[x_mask]\n g_bin = groups[x_mask]\n num_points = len(g_bin)\n\n if num_points > 0:\n for g_idx, g in enumerate(group_vals):\n if use_percent:\n p_array[g_idx, x_idx] = np.sum(\n g_bin == g) / float(num_points)\n else:\n p_array[g_idx, x_idx] = np.sum(\n g_bin == g) \n p = list()\n\n if not color_array is None:\n p.append(\n ax.bar(x_bins,\n p_array[0, :],\n width=x_radius * 1.5,\n color=color_array[0]))\n else:\n p.append(ax.bar(x_bins, p_array[0, :], width=x_radius * 1.5))\n\n for g_idx in range(1, num_groups):\n if not color_array is None:\n p.append(\n ax.bar(x_bins,\n p_array[g_idx, :],\n bottom=p_array[g_idx - 1, :],\n width=x_radius * 1.5,\n color=color_array[g_idx]))\n else:\n p.append(\n ax.bar(x_bins,\n p_array[g_idx, :],\n bottom=p_array[g_idx - 1, :],\n width=x_radius * 1.5))\n #ax.set_xticks(ind,x_bins)\n ax.legend(group_vals)\n # return group_vals,p_array", "def bar(results, h='pdf', **kwargs): # pragma: no cover\n if 'edgecolor' not in kwargs:\n kwargs['edgecolor'] = 'k'\n fig = plt.bar(x=results.bin_centers, height=getattr(results, h),\n width=results.bin_widths, **kwargs)\n xlab = [attr for attr in results.__dir__() if not attr.startswith('_')][0]\n plt.xlabel(xlab)\n plt.ylabel(h)\n return fig", "def make_bars(ax, data, x, ys, width, colors, alpha=.5):\n\n shift=0\n for y, c in zip(ys, colors):\n ax.bar(\n data[x] + shift,\n data[y],\n width = width,\n color=c,\n label=y,\n alpha=alpha\n )\n \n shift += width", "def example_bar_chart():\n xlabel = \"X Label (units)\"\n ylabel = \"Y Label (units)\"\n titlex = \"Example Graph Title\"\n doLegend = True\n\n bar_data = [(1,2,3),(2,3,4),(3,4,5),(4,5,6)]\n labels = [\"label 1\", \"label 2\", \"label 3\", \"label 4\"]\n plot_multibar_chart(bar_data, labels, titlex, xlabel, ylabel)", "def show_barplot(self, *args, **kwargs):\n raise NotImplementedError()", "def bar_chart(datalist, **options):\n dl = len(datalist)\n #if dl > 1:\n # print \"WARNING, currently only 1 data set allowed\"\n # datalist = datalist[0]\n if dl == 3:\n datalist = datalist+[0]\n #bardata = []\n #cnt = 1\n #for pnts in datalist:\n #ind = [i+cnt/dl for i in range(len(pnts))]\n #bardata.append([ind, pnts, xrange, yrange])\n #cnt += 1\n\n g = Graphics()\n g._set_extra_kwds(Graphics._extract_kwds_for_show(options))\n #TODO: improve below for multiple data sets!\n #cnt = 1\n #for ind, pnts, xrange, yrange in bardata:\n #options={'rgbcolor':hue(cnt/dl),'width':0.5/dl}\n # g._bar_chart(ind, pnts, xrange, yrange, options=options)\n # cnt += 1\n #else:\n ind = list(range(len(datalist)))\n g.add_primitive(BarChart(ind, datalist, options=options))\n if options['legend_label']:\n g.legend(True)\n return g", "def stacked_percent_plot(x, groups, x_bins, ax, color_array=None):\n x_radius = (x_bins[1] - x_bins[0]) / 2.0\n # ind = np.arange(len(x_bins))\n\n group_vals = np.unique(groups)\n num_groups = len(group_vals)\n\n p_array = np.zeros((num_groups, len(x_bins)))\n\n for x_idx, x_cent in enumerate(x_bins):\n\n x_mask = (x >= x_cent - x_radius) \\\n & (x < x_cent + x_radius)\n\n # y_bin = y[x_mask]\n g_bin = groups[x_mask]\n num_points = len(g_bin)\n\n if num_points > 0:\n for g_idx, g in enumerate(group_vals):\n p_array[g_idx, x_idx] = np.sum(g_bin == g) / float(num_points)\n p = list()\n\n if not color_array is None:\n p.append(\n ax.bar(x_bins,\n p_array[0, :],\n width=x_radius * 1.5,\n color=color_array[0]))\n else:\n p.append(ax.bar(x_bins, p_array[0, :], width=x_radius * 1.5))\n for g_idx in range(1, num_groups):\n if not color_array is None:\n p.append(\n ax.bar(x_bins,\n p_array[g_idx, :],\n bottom=p_array[g_idx - 1, :],\n width=x_radius * 1.5,\n color=color_array[g_idx]))\n else:\n p.append(\n ax.bar(x_bins,\n p_array[g_idx, :],\n bottom=p_array[g_idx - 1, :],\n width=x_radius * 1.5))\n #ax.set_xticks(ind,x_bins)\n ax.legend(group_vals, bbox_to_anchor=(1.0, 1.0))\n # return group_vals,p_array", "def barchart(kmer_vectors: dict) -> None:\n for genome_name in kmer_vectors:\n cur_v = kmer_vectors[genome_name]\n dataset = list()\n for item in cur_v:\n dataset.append(cur_v.get(item))\n a = np.array(dataset)\n base_labels = [item for item in cur_v]\n y_pos = np.arange(len(base_labels))\n\n plt.bar(y_pos, a, align='center', alpha=0.5)\n plt.xticks(y_pos, base_labels)\n plt.ylabel(\"normalised frequency\")\n plt.xlabel(\"k-mer\")\n plt.title(genome_name)\n\n out_dir = \"/home/oisin/programs/cs318/318assignment/analysis/kmer_analysis/histograms\"\n plt.savefig(f\"{out_dir}/{genome_name}_hist.png\")\n plt.close()", "def propBarPlot(data, # list of 1D boolean data vectors\n names=None, # names of data vectors\n title=' ', # title of plot\n ylbl='Proportion', # y-label\\\n plot=True):\n data = np.array(data)\n N = len(data)\n Lx = [len(col) for col in data]\n\n if names is None:\n names = [str(i + 1) for i in range(N)]\n if N >= 3:\n cols = cl.scales[str(N)]['qual']['Set3']\n else:\n cols = cl.scales[str(3)]['qual']['Set3'][0:N]\n jitter = .03\n\n means = [np.mean(col) for col in data]\n std = [(means[n]*(1-means[n])/Lx[n])**.5 for n in range(N)]\n\n traces = []\n bars = [go.Bar(\n x=list(range(N)),\n y=means,\n marker=dict(\n color=cols),\n text=['N = %d' % (l) for l in Lx],\n name='BAR',\n error_y=dict(\n type='data',\n array=std,\n visible=True\n ),\n showlegend=False\n )]\n traces += bars\n\n xaxis = go.XAxis(\n # title=\"\",\n showgrid=True,\n showline=True,\n ticks=\"\",\n showticklabels=True,\n linewidth=2,\n ticktext=names,\n tickvals=list(range(N)),\n tickfont=dict(size=18)\n )\n\n layout = go.Layout(\n title=title,\n xaxis=xaxis,\n yaxis={'title': ylbl},\n bargap=.5,\n hovermode='closest',\n showlegend=False,\n )\n\n fig = go.Figure(data=traces, layout=layout)\n\n return plotOut(fig, plot)", "def create_barplot(ax, relevances, y_pred, x_lim=1.1, title='', x_label='', concept_names=None, **kwargs):\n # Example data\n y_pred = y_pred.item()\n if len(relevances.squeeze().size()) == 2:\n relevances = relevances[:, y_pred]\n relevances = relevances.squeeze()\n if concept_names is None:\n concept_names = ['C. {}'.format(i + 1) for i in range(len(relevances))]\n else:\n concept_names = concept_names.copy()\n concept_names.reverse()\n y_pos = np.arange(len(concept_names))\n colors = ['b' if r > 0 else 'r' for r in relevances]\n colors.reverse()\n\n ax.barh(y_pos, np.flip(relevances.detach().cpu().numpy()), align='center', color=colors)\n ax.set_yticks(y_pos)\n ax.set_yticklabels(concept_names)\n ax.set_xlim(-x_lim, x_lim)\n ax.set_xlabel(x_label, fontsize=18)\n ax.set_title(title, fontsize=18)", "def plotBarChart(topic, pos_counts, neg_counts):\n outlets = (\"BBC\", \"DailyMail\", \"Guardian\", \"Metro\", \"Mirror\", \"Reuters\", \"Independent\", \"Sun\")\n\n fig, ax = plt.subplots()\n y_pos = np.arange(len(outlets))\n bar_width = 0.20\n opacity = 0.8\n\n rects1 = plt.barh(y_pos, neg_counts, bar_width,\n alpha=opacity,\n color='#ff4542',\n label='Negative')\n\n rects3 = plt.barh(y_pos + bar_width, pos_counts, bar_width,\n alpha=opacity,\n color='#5eff7c',\n label='Positive')\n\n plt.yticks(y_pos, outlets)\n plt.xlabel('News Sentiment Percentage')\n plt.title('News Sentiment Analysis: '+str(topic))\n plt.legend()\n\n plt.tight_layout()\n plt.show()", "def eda_plots(data, xlabels, x_axis_label, title): \n #Getting colour scheme\n \n clrs = []\n\n for x in data:\n if x == max(data):\n clrs.append('green')\n elif x == min(data):\n clrs.append('red')\n else:\n clrs.append('grey')\n \n # Plotting\n plt.figure(figsize=(15,5))\n sns.barplot(x=xlabels, y=data, palette=clrs)\n plt.xlabel(x_axis_label,fontsize = 16)\n plt.ylabel('Total Sales', fontsize = 16)\n plt.title(title,fontsize = 16)", "def svg_generate_bar_chart(self, *elements):\n bar_height = 100\n label_height = 80\n length_factor = 4\n overall_height = bar_height + label_height\n overall_width = 100 * length_factor\n\n svg = self.svg_create_element(overall_height, overall_width)\n\n sum_all_elements = sum([length for unused, length in elements])\n\n current_pos = 0\n bar_group = document.createElementNS(self.svg_namespace, 'g')\n bar_group.setAttribute('id', 'bar_group')\n bar_group.setAttribute('stroke', 'black')\n bar_group.setAttribute('stroke-width', 2)\n\n nr_processed_elements = 0\n for title, length in elements:\n rect_len = int(100 * length / sum_all_elements) * length_factor\n\n if not rect_len:\n continue\n\n colour = self.svg_colours[nr_processed_elements % len(self.svg_colours)]\n\n rect = self.svg_create_rect(current_pos, 0, rect_len, bar_height, colour)\n bar_group.appendChild(rect)\n\n label_group = document.createElementNS(self.svg_namespace, 'g')\n label_group.setAttribute('id', title)\n colour_rect = self.svg_create_rect(0, 0, 20, 20, colour)\n colour_rect.setAttribute('stroke', 'black')\n colour_rect.setAttribute('stroke-width', 2)\n\n text = document.createElementNS(self.svg_namespace, 'text')\n text.setAttribute('x', '30')\n text.setAttribute('y', '18')\n text.textContent = title\n\n label_group.appendChild(colour_rect)\n label_group.appendChild(text)\n\n # TODO replace hardcoded values\n x = 5 + 125 * (nr_processed_elements // 2)\n y = bar_height + 10 + (nr_processed_elements % 2) * 40\n label_group.setAttribute('transform', 'translate({}, {})'.format(x, y))\n\n bar_group.appendChild(label_group)\n\n current_pos += rect_len\n nr_processed_elements += 1\n\n svg.appendChild(bar_group)\n\n return svg", "def plot_barplots(adata, plotsDir, bname, cluster_key='sampleID', cluster_bname='sampleID', analysis_stage_num='01', analysis_stage='raw', color_palette=\"vega_20\"):\r\n # Convert palette into colormap\r\n clcmap = ListedColormap(sc.pl.palettes.zeileis_28)\r\n # Get the DF of tissue and clusters\r\n clusterBatchDF = adata.obs[['batch','{0}'.format(cluster_key)]].copy()\r\n # Replace batch number with batch names\r\n clusterBatchDF.replace({'batch': sampleIdDict}, inplace=True)\r\n # Remove index for groupby\r\n clusterBatchDF.reset_index(drop=True, inplace=True)\r\n # Get the number of cells for each cluster in every tissue\r\n ncellsClusterBatchDF = clusterBatchDF.groupby(['batch','{0}'.format(cluster_key)]).size()\r\n # Get the percent of cells for each cluster in every tissue \r\n pcellsClusterBatchDF = pd.crosstab(index=clusterBatchDF['batch'], columns=clusterBatchDF['{0}'.format(cluster_key)], values=clusterBatchDF['{0}'.format(cluster_key)], aggfunc='count', normalize='index')\r\n # Plot the barplots\r\n fig = plt.figure(figsize=(32,24)); fig.suptitle(\"Cells for each {0} in each tissue\".format(cluster_key))\r\n # plot numbers of cells\r\n ax = fig.add_subplot(2, 2, 1); ncellsClusterBatchDF.unstack().plot(kind='barh', stacked=True, colormap=clcmap, ax=ax, legend=None, title=\"Number of cells\")\r\n # plot percent of cells\r\n ax = fig.add_subplot(2, 2, 2); pcellsClusterBatchDF.plot(kind='barh',stacked=True, colormap=clcmap, ax=ax, title=\"% of cells\")\r\n # Shrink current axis by 20%\r\n box = ax.get_position()\r\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\r\n # Put a legend to the right of the current axis\r\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), title='{0}'.format(cluster_key), title_fontsize=12)\r\n\r\n # Get the number of cells for each tissue in every cluster\r\n nbatchPerClusterIdDF = clusterBatchDF.groupby(['{0}'.format(cluster_key),'batch']).size()\r\n # Get the percent of cells for each tissue in every cluster \r\n pbatchPerClusterIdDF = pd.crosstab(index=clusterBatchDF['{0}'.format(cluster_key)], columns=clusterBatchDF['batch'], values=clusterBatchDF['batch'], aggfunc='count', normalize='index')\r\n # Plot the barplots\r\n ax = fig.add_subplot(2, 2, 3); nbatchPerClusterIdDF.unstack().plot(kind='barh', stacked=True, colormap=clcmap, ax=ax, legend=None, title=\"number of cells for each tissue in every cluster\")\r\n # plot percent of cells\r\n ax = fig.add_subplot(2, 2, 4); pbatchPerClusterIdDF.plot(kind='barh',stacked=True, colormap=clcmap, ax=ax, title=\"% of cells for each tissue in every cluster\")\r\n # Shrink current axis by 20%\r\n box = ax.get_position()\r\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\r\n # Put a legend to the right of the current axis\r\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), title='{0}'.format(cluster_key), title_fontsize=12)\r\n\r\n # Save plots in a 2x2 grid style\r\n plt.tight_layout() # For non-overlaping subplots\r\n plt.savefig(\"{0}/{4}_{3}_{1}_{2}_tissueID_cluster_barplot.png\".format(plotsDir, bname, cluster_bname, analysis_stage, analysis_stage_num) , bbox_inches='tight', dpi=175); plt.close('all')", "def _plot_bar_graph(self, mpl_bargraph):\n today = str(dt.today())\n list_of_dates = self.expenses_tracker.get_week_dates(today)\n list_of_expenses = self._get_expenses_by_dates(list_of_dates)\n frequency_list = utility.get_frequency_history_list(list_of_expenses)\n mpl_bargraph.axes.bar(\n list(frequency_list.keys()), list(frequency_list.values()), align=\"center\"\n )\n mpl_bargraph.axes.set_title(\"Purchases A Day\")\n mpl_bargraph.axes.set_ylabel(\"Number Of Items\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy file from source to destination if needed (skip if source is destination).
def copy(source, destination): source = os.path.abspath(source) destination = os.path.abspath(destination) if source != destination: shutil.copyfile(source, destination)
[ "def copy_file(cls, path, source_dir, destination_dir):\n if not (source_dir / path).exists():\n return\n shutil.copyfile(str(source_dir / path), str(destination_dir / path))", "def copy_file(self, source, destination, cmd=True):\n self._check_path(source)\n self._check_path(destination)\n\n if cmd:\n self.run_shell_cmd(command=[\"copy\", source, destination], cmd=True)\n else:\n self.run_shell_cmd(command=\"copy \"+source+\" \"+destination,\n cmd=False)", "def copy(source, destination):\n\ttry:\n\t\tshutil.copyfile(translatePath(source), translatePath(destination))\n\t\treturn True\n\texcept:\n\t\treturn False", "def _copy(self, src, dest):\n shutil.copyfile(src, dest)\n try:\n shutil.copystat(src, dest)\n except OSError:\n self.log.debug(\"copystat on %s failed\", dest, exc_info=True)", "def safe_copyfile(src, dest):\n if os.path.isdir(dest):\n dest = os.path.join(dest, os.path.basename(src))\n if os.path.lexists(dest):\n if not global_options['overwrite']:\n raise ValueError(\"was asked to copy %s but destination already exists: %s\"\n % (src, dest))\n else:\n # to make sure we can write there ... still fail if it is entire directory ;)\n os.unlink(dest)\n shutil.copyfile(src, dest)", "def copy(source, destination):\n try:\n shutil.copyfile(safe_path(source), safe_path(destination))\n except shutil.Error:\n return False\n else:\n return True", "def copyfile(self, src, dst):\n self.logger.debug('Copying file %s to %s.', src, dst)\n shutil.copy2(src, dst)", "def copy_file(source, dest, mode=0644, mtime=None, bufsize=8096,\n size=-1, uid=-1, gid=-1, read_method='read',\n write_method='write'):\n fsource_obj = None\n fdest_obj = None\n source_flike = False\n dest_flike = False\n if hasattr(source, read_method):\n fsource_obj = source\n source_flike = True\n if hasattr(dest, write_method):\n fdest_obj = dest\n dest_flike = True\n if source_flike and dest_flike:\n _copy_file(fsource_obj, fdest_obj, bufsize,\n size, read_method, write_method)\n return\n if not source_flike and not os.path.isfile(source):\n raise ValueError(\"source \\\"%s\\\" is no file\" % source)\n if not dest_flike:\n if os.path.exists(dest) and not os.path.isfile(dest):\n raise ValueError(\"dest \\\"%s\\\" exists but is no file\" % dest)\n dirname = os.path.dirname(dest)\n if os.path.exists(dest) and not os.access(dest, os.W_OK):\n raise ValueError(\"invalid dest filename: %s is not writable\" %\n dest)\n elif not os.path.exists(dirname):\n # or should we check that it is really a dir?\n raise ValueError(\"invalid dest filename: dir %s does not exist\" %\n dirname)\n elif not os.access(dirname, os.W_OK):\n raise ValueError(\"invalid dest filename: dir %s is not writable\" %\n dirname)\n tmp_filename = ''\n try:\n if not source_flike:\n fsource_obj = open(source, 'rb')\n if not dest_flike:\n dirname = os.path.dirname(dest)\n filename = os.path.basename(dest)\n fdest_obj = NamedTemporaryFile(dir=dirname, prefix=filename,\n delete=False)\n tmp_filename = fdest_obj.name\n _copy_file(fsource_obj, fdest_obj, bufsize,\n size, read_method, write_method)\n if tmp_filename:\n fdest_obj.flush()\n os.rename(tmp_filename, dest)\n finally:\n if not source_flike and fsource_obj is not None:\n fsource_obj.close()\n if not dest_flike and fdest_obj is not None:\n fdest_obj.close()\n if tmp_filename and os.path.isfile(tmp_filename):\n os.unlink(tmp_filename)\n if not dest_flike:\n euid = os.geteuid()\n egid = os.getegid()\n if uid != euid or euid != 0:\n # (probably) insufficient permissions\n uid = -1\n if gid != egid or egid != 0:\n # (probably) insufficient permissions\n gid = -1\n os.chown(dest, uid, gid)\n if mtime is not None:\n os.utime(dest, (-1, mtime))\n os.chmod(dest, mode)", "def copy_file(original_path, destination_path):\n shutil.copyfile(original_path, destination_path)", "def copy_file(server, source, target):\n with setup_server_connection(server) as connection:\n Transfer(connection).put(local=source, remote=target)", "def copy_file(self, src, dst):\n dst_existed = False\n pre_hash = None\n if not self.changed:\n if os.path.isfile(dst):\n dst_existed = True\n pre_hash = self.get_hash(dst)\n copyfile(src, dst)\n if not self.changed:\n if dst_existed:\n post_hash = self.get_hash(dst)\n self.changed = pre_hash == post_hash\n else:\n if os.path.isfile(dst):\n self.changed = True", "def cp(src_filename, dst_filename):\n src_is_remote = is_remote_path(src_filename)\n dst_is_remote = is_remote_path(dst_filename)\n if src_is_remote == dst_is_remote:\n return auto(copy_file, src_filename, dst_filename)\n filesize = auto(get_filesize, src_filename)\n if src_is_remote:\n with open(dst_filename, 'wb') as dst_file:\n return remote(send_file_to_host, src_filename, dst_file, filesize,\n xfer_func=recv_file_from_remote)\n with open(src_filename, 'rb') as src_file:\n return remote(recv_file_from_host, src_file, dst_filename, filesize,\n xfer_func=send_file_to_remote)", "def copy_file (\n source_path,\n target_path,\n allow_undo=True,\n no_confirm=False,\n rename_on_collision=True,\n silent=False,\n hWnd=None\n):\n return _file_operation (\n shellcon.FO_COPY,\n source_path,\n target_path,\n allow_undo,\n no_confirm,\n rename_on_collision,\n silent,\n hWnd\n )", "def Copy_Or_Link_A_File (Source_Path, Target_Path):\n global Target_Count\n if options.Copy_Files:\n Prepare_Target_Location (Target_Path)\n Run_Or_Log ('shutil.copy2 (\"' + Source_Path + '\", \"' + Target_Path + '\")')\n Target_Count = Target_Count + 1\n else:\n Link_A_File (Source_Path, Target_Path)", "def _copy_file(fsource_obj, fdest_obj, bufsize, size,\n read_method, write_method):\n read = getattr(fsource_obj, read_method)\n write = getattr(fdest_obj, write_method)\n for data in iter_read(fsource_obj, bufsize=bufsize, size=size,\n read_method=read_method):\n write(data)", "def copy_file(self, origin_path: str, dest_path: str):\n shutil.copy2(origin_path, dest_path)", "def copy_with_progress ( source , destination ) :\n assert os.path.exists ( source ) and os.path.isfile ( source ), \\\n \"copy_with_progress: ``source'' %s does nto exist!\" % source\n \n total = os.stat ( source ) . st_size\n BLOCK = 512 * 1024\n\n destination = os.path.abspath ( destination ) \n destination = os.path.normpath ( destination )\n destination = os.path.realpath ( destination )\n if os.path.exists ( destination ) and os.path.isdir ( destination ) :\n destination = os.path.join ( destination , os.path.basename ( source ) )\n \n from ostap.utils.progress_bar import ProgressBar \n read = 0\n \n with ProgressBar ( total , silent = total < 3 * BLOCK ) as pbar : \n with open ( source , 'rb' ) as fin :\n with open ( destination , 'wb' ) as fout :\n while True :\n \n block = fin.read ( BLOCK )\n fout.write ( block )\n \n read += len ( block )\n pbar.update_amount ( read )\n if not block : break ## BREAK\n \n assert os.path.exists ( destination ) and \\\n os.path.isfile ( destination ) and \\\n os.stat ( destination ).st_size == total, \\\n \"Invalid ``destination'' %s \" % destination\n \n return os.path.realpath ( destination )", "def copy_file(self, source_path, remote_path):\n sftp = self.get_sftp()\n sftp.put(source_path, remote_path)", "def backup(src, dest, verbose = False):\n\timport os\n\timport shutil\n\timport sys\n\tdef size_if_newer(source, target):\n\t\t\"\"\"If newer returns size else returns False\"\"\"\n\t\tsrc_stat = os.stat(source)\n\t\ttry:\n\t\t\ttarget_ts = os.stat(target).st_mtime\n\t\texcept:\n\t\t\ttarget_ts = 0\n\t\treturn src_stat.st_size if (src_stat.st_mtime-target_ts > 1) else False\n\n\tdef sync_file(source, target, verbose):\n\t\tsize = size_if_newer(source, target)\n\t\tif size:\n\t\t\ttransfer_file(source, target, verbose)\n\t\t\treturn 1\n\t\telif verbose:\n\t\t\tprint \"Skipping %s\" % (source, )\n\t\t\treturn 0\n\n\tdef transfer_file(source, target, verbose):\n\t\ttry:\n\t\t\tshutil.copy2(source, target)\n\t\t\tif verbose:\n\t\t\t\tprint \"Copied %s to %s\" % (source, target)\n\t\texcept:\n\t\t\tos.makedirs(os.path.dirname(target))\n\t\t\ttransfer_file(source, target, verbose)\n\t# Now do it...\n\tfor path, _, files in os.walk(src):\n\t\tdestpath = dest + path[len(src):]\n\t\tif terminated:\n\t\t\tbreak\n\t\tif len(files)>0:\n\t\t\tprint \"Syncing %d files in %s to %s.\" % ( len(files), path, destpath)\n\t\t\tskipped, copied = 0, 0\n\t\t\tfor file in files:\n\t\t\t\tr = sync_file(path + \"/\" + file, destpath + \"/\" + file, verbose)\n\t\t\t\tif r==1:\n\t\t\t\t\tcopied=copied+1\n\t\t\t\telse:\n\t\t\t\t\tskipped=skipped+1\n\t\t\t\tif terminated:\n\t\t\t\t\tbreak\n\t\t\tprint \"%d copied, %d skipped\" % (copied, skipped)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Busca el proyecto por nombre
def filtrar(self, nombre): return Proyecto.query.filter(Proyecto.nombre == nombre).first_or_404()
[ "def objeto_por_nome(self, nome):\n socios_registrados = [socio.nome for socio in self.__socios]\n funcionarios_registrados = [funcionario.nome for funcionario in self.__funcionarios]\n if nome in socios_registrados:\n objeto = self.__socios[socios_registrados.index(nome)]\n else:\n objeto = self.__funcionarios[funcionarios_registrados.index(nome)]\n return objeto", "def usersDeProyecto(self, nombre):\n proyecto = self.filtrar(nombre)\n return proyecto.users", "def get_name(self):\r\n return self.__nombre", "def get_catalogo_item_nombre(codigo_catalogo, nombre):\n return CatalogoItem.objects.filter(catalogo__codigo=codigo_catalogo, nombre=nombre).first()", "def set_nombre_producto(self, producto):\n return sistema.set_producto_actual(producto)", "def nombre(self, nombre):\n\n return dict(recibos=model.Recibo.query.filter(\n model.Recibo.cliente.like(\"%\" + nombre + \"%\")))", "def project_name():\n try:\n return os.environ[constants.ENV_VARIABLES.HOPSWORKS_PROJECT_NAME_ENV_VAR]\n except:\n pass\n\n hops_user = project_user()\n hops_user_split = hops_user.split(\"__\") # project users have username project__user\n project = hops_user_split[0]\n return project", "def communeName():", "def nombre_completo(self):\n return f\"{self.nombre} {self.apellido}\"", "def project(self, login: str, slug: str) -> Project:\n pass", "def setNombreComercial(self, nombre_comercial):\r\n self.nombre_comercial = nombre_comercial", "def getNombreComercial(self):\r\n return self.nombre_comercial", "def get_nombre_corto(self):\n return 'Nivel {0:d}'.format(self.numero)", "def get_project_company(args):\n\n # TODO: generate list of valid companies in prompts based on COMPANIES.\n company = get_value(args, 'company', 'Enter company [pc|eg|ag]: ').lower()\n while company not in COMPANIES:\n print(\"A company must be selected. Please choose between pc, eg, and ag.\")\n company = get_value(args, 'company', 'Enter company [pc|eg|ag]: ').lower()\n continue\n return company", "def __getNombre(self):\n\t\treturn self.__nombre", "def get_name(self):\n\t\treturn self._env.get_project_name()", "def createProyecto():\n # crea un proyecto\n p = Proyecto(nombre=\"proyecto1\", descripcion=\"sistema 1\", presupuesto=10000)\n MgrProyecto().guardar(p)\n per = MgrPermiso().filtrarXModulo(\"ModuloGestion\")\n r = Rol(nombre=\"LiderDeProyecto\", descripcion=\"rol de lider\", ambito= p.nombre, permisos=per)\n MgrRol().guardar(r)\n MgrProyecto().asignarLider(proyecto = p , rol = r, nameLider = \"lory\")\n p = MgrProyecto().filtrar(\"proyecto1\")\n c = Comite(nombre=\"comite-proyecto1\", descripcion=\"comite de cambio\", cantMiembro=3, proyectoId=p.idProyecto)\n MgrComite().guardar(c)\n u = MgrProyecto().getUserLider(p.idProyecto)\n MgrComite().asignarUsuario(p,u)\n\n p = Proyecto(nombre=\"proyecto2\", descripcion=\"sistema 2\", presupuesto=20000)\n MgrProyecto().guardar(p)\n per = MgrPermiso().filtrarXModulo(\"ModuloGestion\")\n r = Rol(nombre=\"LiderDeProyecto\", descripcion=\"rol de lider\", ambito= p.nombre, permisos=per)\n MgrRol().guardar(r)\n MgrProyecto().asignarLider(proyecto = p , rol = r, nameLider = \"vavi\")\n p = MgrProyecto().filtrar(\"proyecto2\")\n c = Comite(nombre=\"comite-proyecto2\", descripcion=\"comite de cambio\", cantMiembro=3, proyectoId=p.idProyecto)\n MgrComite().guardar(c)\n u = MgrProyecto().getUserLider(p.idProyecto)\n MgrComite().asignarUsuario(p,u)\n\n p = Proyecto(nombre=\"proyecto3\", descripcion=\"sistema 3\", presupuesto=30000)\n MgrProyecto().guardar(p)\n per = MgrPermiso().filtrarXModulo(\"ModuloGestion\")\n r = Rol(nombre=\"LiderDeProyecto\", descripcion=\"rol de lider\", ambito= p.nombre, permisos=per)\n MgrRol().guardar(r)\n MgrProyecto().asignarLider(proyecto = p , rol = r, nameLider = \"guille\")\n p = MgrProyecto().filtrar(\"proyecto3\")\n c = Comite(nombre=\"comite-proyecto3\", descripcion=\"comite de cambio\", cantMiembro=3, proyectoId=p.idProyecto)\n MgrComite().guardar(c)\n u = MgrProyecto().getUserLider(p.idProyecto)\n MgrComite().asignarUsuario(p,u)\n \n p = Proyecto(nombre=\"proyecto4\", descripcion=\"sistema 4\", presupuesto=40000)\n MgrProyecto().guardar(p)\n per = MgrPermiso().filtrarXModulo(\"ModuloGestion\")\n r = Rol(nombre=\"LiderDeProyecto\", descripcion=\"rol de lider\", ambito= p.nombre, permisos=per)\n MgrRol().guardar(r)\n MgrProyecto().asignarLider(proyecto = p , rol = r, nameLider = \"stfy\")\n p = MgrProyecto().filtrar(\"proyecto3\")\n c = Comite(nombre=\"comite-proyecto3\", descripcion=\"comite de cambio\", cantMiembro=2, proyectoId=p.idProyecto)\n MgrComite().guardar(c)\n u = MgrProyecto().getUserLider(p.idProyecto)\n MgrComite().asignarUsuario(p,u)\n\n print \":cargo proyectos:\"", "def project_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"project_name\")", "def project_name(self, session):\n # FIXME: This is not adequate, see comments elsewhere about it.\n session = int(session) # To allow a string, e.g. '01' for 1\n return self.course.projects[session]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lista los proyectos Activos
def listarActivo(self): return Proyecto.query.filter(Proyecto.estado == "Activo").all()
[ "def lista_procesos(self):\n for item in process_iter():\n try:\n data = item.as_dict(attrs=['pid', 'name', 'connections'])\n self.proclist.append(Proceso(pid=data['pid'],\n nombre=data['name'], imagen=item.exe(),\n conexiones=data['connections']))\n\n except AccessDenied:\n print('[!] Problema de acceso!! Ejecuta ese programa como root/Adminsitrador')\n sys.exit(-1)\n except PermissionError:\n print('[!] Problema de acceso!! Ejecuta ese programa como root/Adminsitrador')\n sys.exit(-1)\n return self.proclist", "def obtener_subactividades(idact):\n act = Actividad.objects.filter(actividad_padre=idact, is_active = True)\n lista = []\n for elem in act:\n lista.append(elem)\n return lista", "def colaboradores(idpiz):\n colaboradores= []\n act= Actividad.objects.filter(idpizactividad= idpiz, is_active = True).distinct('loginasignado')\n for elem in act:\n persona = elem.loginasignado\n usuario = User.objects.get(username= persona)\n if usuario.is_active == True:\n\t colaboradores.append(usuario)\n\n return colaboradores", "def activites(self):\n return self.__activites", "def listActivities(self):\n\t\tsys.stderr.write(\"\\nSTART: listActivities()\\n\")\n\t\tstatus=0\n\t\tactivities=[]\n\t\ttry:\n\t\t\tfor activity in self.process.activities:\n\t\t\t\tactivities.append(activity.activityTitle)\n\t\t\tsys.stderr.write(\"END: listActivities()\\n\")\n\t\t\treturn {'operation' : 'listActivities', 'status' : 1, 'json' : json.dumps( {\"result\": activities } ) }\n\t\texcept ImportError:\n\t\t\tsys.stderr.write(\"EXCEPTION: listActivities()\\n\")\n\t\t\texc_type, exc_value, exc_traceback = sys.exc_info()\n\t\t\ttraceback.print_tb(exc_traceback)", "def afficherProcessusActifs(self):\n\t\tprint(\"Affichage des processus actifs :\")\n\t\tfor r in self.a.ressources:\n\t\t\tallocatedProc = r.getAllocatedProcessus()\n\t\t\tif (allocatedProc):\n\t\t\t\tprint(str(allocatedProc))", "def list_active_sessions():\n return sorted(active_sessions.values(), key=attrgetter('start_time'))", "def obtener_hijos(actividad):\n hijos = Actividad.objects.filter(actividad_padre = actividad)\n lista = []\n for elem in hijos:\n lista.append(elem)\n\n return lista", "def ver_actividades_proyecto(request, flujo_id, proyecto_id):\n proyecto = get_object_or_404(Proyecto, id=proyecto_id)\n flujo = get_object_or_404(Flujo, id=flujo_id)\n user = User.objects.get(username=request.user.username)\n userRolProy = UsuarioRolProyecto.objects.filter(proyecto=proyecto_id)\n roles = UsuarioRolProyecto.objects.filter(usuario = user, proyecto = proyecto).only('rol')\n permisos_obj = []\n for i in roles:\n permisos_obj.extend(i.rol.permisos.all())\n permisos = []\n for i in permisos_obj:\n permisos.append(i.nombre)\n fluActProy = FlujoActividadProyecto.objects.filter(flujo = flujo_id, proyecto = proyecto_id).order_by('orden')\n actList = {}\n ultActividad = 0\n for rec in fluActProy:\n if not actList.has_key(rec.flujo.id):\n actList[rec.flujo.id] = {}\n if not actList[rec.flujo.id].has_key(int(rec.orden)):\n actList[rec.flujo.id][int(rec.orden)] = {}\n if not actList[rec.flujo.id][int(rec.orden)].has_key(rec.actividad.id):\n actList[rec.flujo.id][int(rec.orden)][rec.actividad.id] = []\n act = Actividad.objects.get(nombre = rec.actividad)\n actList[rec.flujo.id][int(rec.orden)][rec.actividad.id].append(act.nombre)\n actList[rec.flujo.id][int(rec.orden)][rec.actividad.id].append(act.descripcion)\n ultActividad = int(rec.orden)\n if actList:\n actDict = actList[int(flujo_id)]\n else:\n actDict = None\n lista = User.objects.all().order_by(\"id\")\n proyPend = False\n if proyecto.estado == 1:\n proyPend = True\n ctx = {'flujo':flujo,\n 'proyecto':proyecto,\n 'actividades':actDict,\n 'proyPend':proyPend,\n 'ultActividad':ultActividad,\n 'ver_flujo': 'ver flujo' in permisos,\n 'asignar_actividades_proyecto': 'asignar actividades proyecto' in permisos\n }\n return render_to_response('proyectos/admin_actividades_proyecto.html', ctx, context_instance=RequestContext(request))", "def usersDeProyecto(self, nombre):\n proyecto = self.filtrar(nombre)\n return proyecto.users", "def activitylist(self):\n return list(self.activities().values()) # insertion ordered (python >=3.6), triggers shallow copy", "def getUserActivities(self):\n return self.base.get(\"user_activities\", [])", "def list() -> None:\n config = load_config_file()\n running_instances = get_running_instances(config)\n print_formatted_instances(running_instances)\n logging.info('Done!')", "def get_start_activities():\n clean_expired_sessions()\n\n # reads the session\n session = request.args.get('session', type=str)\n # reads the requested process name\n process = request.args.get('process', default='receipt', type=str)\n\n logging.info(\"get_start_activities start session=\" + str(session) + \" process=\" + str(process))\n\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n dictio = lh.get_handler_for_process_and_session(process, session).get_start_activities()\n for entry in dictio:\n dictio[entry] = int(dictio[entry])\n list_act = sorted([(x, y) for x, y in dictio.items()], key=lambda x: x[1], reverse=True)\n logging.info(\n \"get_start_activities complete session=\" + str(session) + \" process=\" + str(process) + \" user=\" + str(\n user))\n\n return jsonify({\"startActivities\": list_act})\n\n return jsonify({\"startActivities\": []})", "async def autorole_list(self, ctx: Context):\n\n roles = self.role_cache[ctx.guild.id]\n if not roles:\n await ctx.send(\"No autoroles registered.\")\n return\n\n roles = (\n (\n ctx.guild.get_role(\n self.role_cache[ctx.guild.id].get_role_id(thz)\n ),\n thz,\n )\n for thz\n in self.role_cache[ctx.guild.id].values\n )\n\n pages = EmbedPaginator(ctx, f\"{ctx.guild.name} autoroles...\")\n for index, (role, thz) in enumerate(roles, start=1):\n pages.add_line(f'{index}. {role.mention} - {thz:,} THz')\n\n await pages.send_to()", "def find_activities(flow):\n\n activities = []\n for module_attribute in dir(flow):\n instance = getattr(flow, module_attribute)\n if isinstance(instance, Activity):\n activities.append(instance)\n return activities", "def ActiveLoad(self):\n proxies = []\n for act in self.proxies.values():\n if act.isChecked():\n proxies.append(act)\n return proxies", "def listaProyectos_a_iniciar(self,page=1):\n try:\n proy = DBSession.query(Proyecto).filter_by(iniciado=False).order_by(Proyecto.id_proyecto)\n usuario = DBSession.query(Usuario).filter_by(nombre_usuario=request.identity['repoze.who.userid']).first()\n proyectos=[]\n for p in proy:\n if usuario.proyectos.count(p)==1:\n proyectos.append(p)\n currentPage = paginate.Page(proyectos, page, items_per_page=10)\n except SQLAlchemyError:\n flash(_(\"No se pudo acceder a Proyectos! SQLAlchemyError...\"), 'error')\n redirect(\"/admin\")\n except (AttributeError, NameError):\n flash(_(\"No se pudo acceder a Proyectos! Hay Problemas con el servidor...\"), 'error')\n redirect(\"/admin\")\n\n return dict(proyectos=currentPage.items, page='listaProyectos_a_iniciar', currentPage=currentPage)", "def current_all_tasks(self):\n\n com = Competition.current_competition()\n return [task for task in self.tasks if task.competition_id == com.id]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lista los proyectos Finalizados
def listarFinalizado(self): return Proyecto.query.filter(Proyecto.estado == "Finalizado").all()
[ "def lista_procesos(self):\n for item in process_iter():\n try:\n data = item.as_dict(attrs=['pid', 'name', 'connections'])\n self.proclist.append(Proceso(pid=data['pid'],\n nombre=data['name'], imagen=item.exe(),\n conexiones=data['connections']))\n\n except AccessDenied:\n print('[!] Problema de acceso!! Ejecuta ese programa como root/Adminsitrador')\n sys.exit(-1)\n except PermissionError:\n print('[!] Problema de acceso!! Ejecuta ese programa como root/Adminsitrador')\n sys.exit(-1)\n return self.proclist", "def contar_proyectos():\n return len(Proyecto.objects.all())", "def listaProyectos_a_iniciar(self,page=1):\n try:\n proy = DBSession.query(Proyecto).filter_by(iniciado=False).order_by(Proyecto.id_proyecto)\n usuario = DBSession.query(Usuario).filter_by(nombre_usuario=request.identity['repoze.who.userid']).first()\n proyectos=[]\n for p in proy:\n if usuario.proyectos.count(p)==1:\n proyectos.append(p)\n currentPage = paginate.Page(proyectos, page, items_per_page=10)\n except SQLAlchemyError:\n flash(_(\"No se pudo acceder a Proyectos! SQLAlchemyError...\"), 'error')\n redirect(\"/admin\")\n except (AttributeError, NameError):\n flash(_(\"No se pudo acceder a Proyectos! Hay Problemas con el servidor...\"), 'error')\n redirect(\"/admin\")\n\n return dict(proyectos=currentPage.items, page='listaProyectos_a_iniciar', currentPage=currentPage)", "def listaUsuariosDeComite(self, proyecto):\n #Estilos de la tabla para cabeceras y datos\n thead = self.estiloHoja['Heading5']\n thead.alignment=TA_CENTER\n tbody = self.estiloHoja[\"BodyText\"]\n tbody.alignment=TA_LEFT\n cabecera = [Paragraph('Nick',thead),Paragraph('Nombre',thead),Paragraph('Apellido',thead),Paragraph('Email',thead),Paragraph('Estado',thead)]\n contenido = [cabecera]\n lista = MgrComite().miembrosComite(proyecto.nombre)\n tabla = Table(contenido)\n for u in lista:\n contenido.append([Paragraph(u.name,tbody), Paragraph(u.nombre,tbody), Paragraph(u.apellido,tbody), Paragraph(u.email,tbody), Paragraph(u.estado,tbody)])\n tabla = Table(contenido) \n tabla.setStyle(self.tablaStyle)\n return tabla", "def listarActivo(self):\n return Proyecto.query.filter(Proyecto.estado == \"Activo\").all()", "def listaRequisitos():\n sql = \"select * from requisitos\"\n requeriments = []\n rows = executeQuery(sql)\n for row in rows:\n requeriments.append(Requisito.desdeFila(row).serialize)\n return requeriments", "def _get_all_programs(self):\n programs = []\n\n for program in self.config['programs']:\n programs.append( program )\n return programs", "def listarPendiente(self):\n return Proyecto.query.filter(Proyecto.estado == \"Pendiente\").all()", "def clear_exits(self):\r\n for out_exit in [exi for exi in ObjectDB.objects.get_contents(self) if exi.db_destination]:\r\n out_exit.delete()\r\n for in_exit in ObjectDB.objects.filter(db_destination=self):\r\n in_exit.delete()", "def proyectoFinalizado(self, nombre):\n proyecto = self.filtrar(nombre)\n for fase in proyecto.listafases:\n if fase.estado != \"Finalizado\":\n return False\n return True", "async def listCommands(self) -> None:\n embedVar:Embed = Embed(title=\"~help para ter os comandos\", description=\"Para voce infernizar seus amigos\", colour=Colour.from_rgb(255, 102, 102))\n \n comandos:list = list(allComands.keys())\n for x in range(len(allComands.keys())):\n cmd = comandos[x]\n embedVar.add_field(name=cmd, value=allComands[cmd], inline=False)\n\n await self.message.channel.send(embed=embedVar)\n\n embedVar = comandos = cmd = None\n del embedVar, comandos, cmd", "def finalizer():\n pv_objs = []\n\n # Get PV form PVC instances and delete PVCs\n for instance in instances:\n if not instance.is_deleted:\n pv_objs.append(instance.backed_pv_obj)\n instance.delete()\n instance.ocp.wait_for_delete(instance.name)\n\n # Wait for PVs to delete\n helpers.wait_for_pv_delete(pv_objs)", "def usersDeProyecto(self, nombre):\n proyecto = self.filtrar(nombre)\n return proyecto.users", "def listaComissoes(lista_deputados):\n comissoes = []\n for deputado in lista_deputados:\n comissoes = comissoes + lista_deputados[deputado][\"comissoes\"]\n comissoes = list(set(comissoes))\n return comissoes", "def colaboradores(idpiz):\n colaboradores= []\n act= Actividad.objects.filter(idpizactividad= idpiz, is_active = True).distinct('loginasignado')\n for elem in act:\n persona = elem.loginasignado\n usuario = User.objects.get(username= persona)\n if usuario.is_active == True:\n\t colaboradores.append(usuario)\n\n return colaboradores", "def end(self):\n self._log.debug('Partida finalizada.')\n for p in self.players: p.disconnect()\n self.players = []", "def delete_all(self):\n local_session = self.conn()\n count = local_session.Profiler_Sessions.query.delete()\n local_session.commit()\n local_session.remove()\n return count", "def getAllMembresias(self):\n database = self.database\n sql = \"SELECT * FROM hermes.membresias;\"\n data = database.executeQuery(sql)\n lista = {}\n final = []\n if len(data) > 0:\n for x in data:\n lista = self.convertTuplaToList(x)\n final.append(lista)\n return final", "def all_platzi_workers():\n\tall_platzi_workers = [worker['name'] for worker in DATA if worker['organization']=='Platzi']\n\tprint('all_platzi_workers', end='\\n')\n\tprint(' ', end='\\n')\n\tprint(all_platzi_workers)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lista los proyectos Pendientes
def listarPendiente(self): return Proyecto.query.filter(Proyecto.estado == "Pendiente").all()
[ "def lista_procesos(self):\n for item in process_iter():\n try:\n data = item.as_dict(attrs=['pid', 'name', 'connections'])\n self.proclist.append(Proceso(pid=data['pid'],\n nombre=data['name'], imagen=item.exe(),\n conexiones=data['connections']))\n\n except AccessDenied:\n print('[!] Problema de acceso!! Ejecuta ese programa como root/Adminsitrador')\n sys.exit(-1)\n except PermissionError:\n print('[!] Problema de acceso!! Ejecuta ese programa como root/Adminsitrador')\n sys.exit(-1)\n return self.proclist", "def listaProyectos_a_iniciar(self,page=1):\n try:\n proy = DBSession.query(Proyecto).filter_by(iniciado=False).order_by(Proyecto.id_proyecto)\n usuario = DBSession.query(Usuario).filter_by(nombre_usuario=request.identity['repoze.who.userid']).first()\n proyectos=[]\n for p in proy:\n if usuario.proyectos.count(p)==1:\n proyectos.append(p)\n currentPage = paginate.Page(proyectos, page, items_per_page=10)\n except SQLAlchemyError:\n flash(_(\"No se pudo acceder a Proyectos! SQLAlchemyError...\"), 'error')\n redirect(\"/admin\")\n except (AttributeError, NameError):\n flash(_(\"No se pudo acceder a Proyectos! Hay Problemas con el servidor...\"), 'error')\n redirect(\"/admin\")\n\n return dict(proyectos=currentPage.items, page='listaProyectos_a_iniciar', currentPage=currentPage)", "def get_prousers(self, count: int = 5):\n resp = sess.get(api.pro_users % (self.symbol, count))\n dt = resp.ok and resp.json()\n self.prousers = [User(i) for i in dt]", "def colaboradores(idpiz):\n colaboradores= []\n act= Actividad.objects.filter(idpizactividad= idpiz, is_active = True).distinct('loginasignado')\n for elem in act:\n persona = elem.loginasignado\n usuario = User.objects.get(username= persona)\n if usuario.is_active == True:\n\t colaboradores.append(usuario)\n\n return colaboradores", "def ps_list(self) -> str:\n return self.run_device_command(\"ps-list\")[0]", "def listarActivo(self):\n return Proyecto.query.filter(Proyecto.estado == \"Activo\").all()", "def usersDeProyecto(self, nombre):\n proyecto = self.filtrar(nombre)\n return proyecto.users", "def get_communicationProfiles(self):\n # profiles\n lines = []\n # nlines\n # idx\n # line\n # cpos\n # profno\n res = []\n\n profiles = self._AT(\"+UMNOPROF=?\")\n lines = (profiles).split('\\n')\n nlines = len(lines)\n if not (nlines > 0):\n self._throw(YAPI.IO_ERROR, \"fail to retrieve profile list\")\n return res\n del res[:]\n idx = 0\n while idx < nlines:\n line = lines[idx]\n cpos = line.find(\":\")\n if cpos > 0:\n profno = YAPI._atoi((line)[0: 0 + cpos])\n if profno > 1:\n res.append(line)\n idx = idx + 1\n\n return res", "def attentesProcessus(self): \n\t\tprint(\"Affichage des attentes entre processus :\")\n\t\tattentes = self.a.attentesEntreProcessus()\n\t\tfor attente in attentes:\n\t\t\tprint(str(attente[0]) + \" attend \" + str(attente[1]))", "def get_programs(conn) :\n\n cur = conn.cursor() # database table cursor\n\n # get all programs in the database\n cur.execute(\"SELECT program_name, description, cmd_line_prefix FROM programs\") \n progs = cur.fetchall()\n\n return progs", "def listadoPermisosNoAsignados(idproyecto, idrol):\n #permisos = db_session.query(Permiso).join(Recurso, Recurso.id == Permiso.id_recurso).join(Proyecto, Proyecto.id == Recurso.id_proyecto).filter(Proyecto.id == idproyecto).filter(~Permiso.id.in_(db_session.query(Permiso.id).join(RolPermiso, RolPermiso.id_permiso == Permiso.id).filter(RolPermiso.id_rol == idrol))).all()\n permisos = db_session.query(Permiso).from_statement('SELECT p.id FROM permiso p' +\n ' JOIN recurso r ON r.id = p.id_recurso JOIN ('+\n ' (SELECT rec.id AS recu, pr.id AS proy FROM recurso rec'+\n ' JOIN proyecto pr ON pr.id = rec.id_proyecto'+ \n ' WHERE pr.id= '+str(idproyecto)+')'+\n ' UNION ALL'+\n ' (SELECT recur.id AS recu, pro.id AS proy FROM recurso recur'+\n ' JOIN fase fa ON fa.id = recur.id_fase'+\n ' JOIN proyecto pro ON pro.id = fa.id_proyecto'+ \n ' WHERE pro.id = '+str(idproyecto)+')) AS all_rec'+\n ' ON r.id = all_rec.recu'+\n ' WHERE all_rec.proy = '+str(idproyecto) +\n ' AND p.id NOT IN (SELECT per.id FROM permiso per'+ \n ' JOIN rol_permiso rp ON rp.id_permiso = per.id'\n ' WHERE rp.id_rol = '+str(idrol)+')').all()\n return permisos", "def usuarios_conectados():\n\n global my_user\n print(\"Actualizando clientes conectados.\")\n usuarios = api.get_AllUser()\n lista_usarios = []\n\n for user in usuarios:\n if user['Estado'] == '1':\n # Anadimos todos los users menos el propio.\n if user['Nombre'] != my_user:\n lista_usarios.append(user['Nombre'])\n\n if len(lista_usarios) == 0:\n lista_usarios = ['- Vacio -']\n\n return lista_usarios", "def _get_all_programs(self):\n programs = []\n\n for program in self.config['programs']:\n programs.append( program )\n return programs", "def recargarDominiosPublicamentePermitidos(self):\n modulo_logger.log(logging.DEBUG, \"Recargando dominios publicamente \"\n \"permitidos\")\n conexion = sqlite3.connect(config.PATH_DB)\n cursor = conexion.cursor()\n self.dominios_publicamente_permitidos = []\n respuesta = cursor.execute(\n 'select url from dominios_publicamente_permitidos'\n ).fetchall()\n for fila in respuesta:\n self.dominios_publicamente_permitidos.append(fila[0])\n conexion.close()", "def get_programs() :\n\n [prog_names, descriptions, cmd_line_prefixes] = db.get_programs()\n\n return [prog_names, descriptions, cmd_line_prefixes]", "def _find_tac_member_partners(self):\n\n sql = \"\"\"\nSELECT Partner_Code\n FROM PiptUserTAC AS putac\n JOIN Partner AS p ON putac.Partner_Id = p.Partner_Id\n WHERE PiptUser_Id=%(user_id)s\n \"\"\"\n df = self._query(sql, params=dict(user_id=self._user_id))\n\n return [pc for pc in df[\"Partner_Code\"].tolist()]", "def imprime_lista(self):\n print(\"\\n\\n\" + str(self.simulador.tempo) + \">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\\n\")\n for eve in self.lista:\n print(eve)\n print(\"\\n\\n<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\\n\")", "def get_current_list_of_professors(driver):\n results = []\n list_elems = driver.find_elements_by_xpath(\"//li[contains(@id, 'my-professor')]\")\n for li in list_elems:\n link = li.find_element_by_tag_name('a')\n url = link.get_attribute('href')\n name = link.find_element_by_class_name('name').text.split('\\n')[0]\n last, first = name.split(', ', 1)\n results.append((first + ' ' + last, url))\n return results", "def listaUsuariosDeComite(self, proyecto):\n #Estilos de la tabla para cabeceras y datos\n thead = self.estiloHoja['Heading5']\n thead.alignment=TA_CENTER\n tbody = self.estiloHoja[\"BodyText\"]\n tbody.alignment=TA_LEFT\n cabecera = [Paragraph('Nick',thead),Paragraph('Nombre',thead),Paragraph('Apellido',thead),Paragraph('Email',thead),Paragraph('Estado',thead)]\n contenido = [cabecera]\n lista = MgrComite().miembrosComite(proyecto.nombre)\n tabla = Table(contenido)\n for u in lista:\n contenido.append([Paragraph(u.name,tbody), Paragraph(u.nombre,tbody), Paragraph(u.apellido,tbody), Paragraph(u.email,tbody), Paragraph(u.estado,tbody)])\n tabla = Table(contenido) \n tabla.setStyle(self.tablaStyle)\n return tabla" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retorna True si todas las fases del proyecto estan en estado desarrollo
def proyectoIniciado(self, nombre): proyecto = self.filtrar(nombre) for fase in proyecto.listafases: if fase.estado != "Desarrollo": return False return True
[ "def proyectoFinalizado(self, nombre):\n proyecto = self.filtrar(nombre)\n for fase in proyecto.listafases:\n if fase.estado != \"Finalizado\":\n return False\n return True", "def done_exposing(self):\n self.logger.debug(\"Checking if observation has exposures: {}/{}\".format(self.exp_num + 1, len(self.exposures)))\n\n if len(self.exposures) > 0:\n self._done_exposing = all([exp.images_exist for exp in self.exposures])\n else:\n self._done_exposing = False\n\n return self._done_exposing", "def isDissociation(self):\n return len(self.reactants) == 1 and len(self.products) > 1", "def _check_evaled(self, refs):\n bools = []\n for ref in refs:\n bools.append(self.dep_graph[ref]['evaluated'])\n return all(bools)", "def has_life(self):\n num_alive = self.board.sum()\n return num_alive > 0", "def dying(self):\n return len(self.death_sprites) > int(self.death_sprite_current) >= 0", "def is_done(self) -> bool:\n n_completed = 0\n final_depth = self._get_depth_of(self.fidelities[-1])\n for trial in self.lineages.get_trials_at_depth(final_depth):\n n_completed += int(trial.status == \"completed\")\n\n return n_completed >= self.population_size", "def has_grad(self) -> bool:\n return self.check_sensi_orders((1,), MODE_FUN)", "def isDestroyed(self):\n if self.missiles <= 0 and self.mainBattery <= 0 \\\n and self.secondaryBattery <= 0 and self.tread <= 0:\n self.destroyed = True\n self.status = Status.DESTROYED\n else:\n self.destroyed = False\n self.status = Status.NORMAL\n return self.destroyed", "def is_loss(self) -> bool:\n return (self.cost - self.fees) <= 0", "def professors_full():\n return np.sum(prof_avail) == 0", "def is_free(self) -> bool:\n return self.price_overview.final == 0", "def ok(self):\n return self.salida == 0", "def needs_being_erased(self) -> bool:\n project_model = self.project_or_none()\n return False if project_model is None else not project_model.status.is_drawn", "def endgame(self):\n\n j1, j2, vacio = self.contar_fichas()\n\n if j1 == 0 or j2 == 0 or vacio == 0:\n\n self.completado = True\n return True\n\n if self.generarJugadasPosibles(1) == [] and self.generarJugadasPosibles(2) == []:\n\n self.completado = True\n return True\n\n return False", "def is_finished(self):\n return len(self.legalMoves) == 0", "def consider_deactivation(self):\n pass", "def is_indel(self):\n is_sv = self.is_sv\n\n if len(self.REF) > 1 and not is_sv:\n return True\n for alt in self.ALT:\n if alt is None:\n return False\n if alt.type != \"SNV\" and alt.type != \"MNV\":\n return False\n elif len(alt) != len(self.REF):\n # the diff. b/w INDELs and SVs can be murky.\n if not is_sv:\n # 1\t2827693\t.\tCCCCTCGCA\tC\t.\tPASS\tAC=10;\n return True\n else:\n # 1\t2827693\t.\tCCCCTCGCA\tC\t.\tPASS\tSVTYPE=DEL;\n return False\n return False", "def has_completed_every_train(self):\n return len(self.trains_queue) == 0 and all([train.has_finished() for train in self.trains])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retorna true si todas las fases del proyecto estan en estado finalizado
def proyectoFinalizado(self, nombre): proyecto = self.filtrar(nombre) for fase in proyecto.listafases: if fase.estado != "Finalizado": return False return True
[ "def has_finished_provenance(self):\n return len(self._pending) < self._pending_count", "def check_finished(self):\n if self.max_iterations == -1:\n return False\n return self.iterations >= self.max_iterations", "def is_finished(self):\n return len(self.legalMoves) == 0", "def finished(self):\n return (self.pc >= len(self.program))", "def is_done(self) -> bool:\n n_completed = 0\n final_depth = self._get_depth_of(self.fidelities[-1])\n for trial in self.lineages.get_trials_at_depth(final_depth):\n n_completed += int(trial.status == \"completed\")\n\n return n_completed >= self.population_size", "def has_completed_every_train(self):\n return len(self.trains_queue) == 0 and all([train.has_finished() for train in self.trains])", "def endgame(self):\n\n j1, j2, vacio = self.contar_fichas()\n\n if j1 == 0 or j2 == 0 or vacio == 0:\n\n self.completado = True\n return True\n\n if self.generarJugadasPosibles(1) == [] and self.generarJugadasPosibles(2) == []:\n\n self.completado = True\n return True\n\n return False", "def _is_finished(self):\n if self.__screen.should_end():\n # pressed exit key\n self.exit_game(*GameRunner.QUITE_ALERT)\n if self.__lives <= 0:\n # out of lives\n self.exit_game(*GameRunner.LOST_ALERT)\n if len(self.__asteroids) == 0:\n # no more asteroids\n self.exit_game(*GameRunner.WIN_ALERT)", "def is_finished(self):\n return self.current_element == len(self.work_data)", "def finished(self):\n # type: () -> bool\n return self._status is None", "def is_finish(self):\n return self.action.is_finish", "def is_finished(self):\n return self._state == \"STATE_FINISHED\" or self._state == \"STATE_EXCEPTION\" or self._state == \"STATE_INIT\"", "def finished(self):\n return self.board == self.goal", "def is_dataset_finished(self):\n n_completed = self.dataset.processed\n\n # Dataset has reached max pipeline depth\n if self.dataset.depth >= self.max_pipeline_depth:\n LOGGER.info('Dataset {} has reached max pipeline depth!'.format(self.dataset))\n return True\n\n # No budget for dataset\n if n_completed >= self.dataset.budget:\n LOGGER.info('Algorithm budget for dataset {} has run out!'.format(self.dataset))\n return True\n\n return False", "def isWorkflowFinished(self):\n if not self.tasks:\n return False\n\n for taskInfo in viewvalues(self.tasks):\n if not taskInfo.isTaskCompleted():\n return False\n return True", "def isDone(self):\n ## If the process has not been started yet, then return False\n if not self.started:\n return False\n\n return True", "def is_done(self) -> bool:\n return self.status in (JobStatus.Completed, JobStatus.Deleted)", "def is_finished(self):\n return self.lives == 0 or all(char in self.guesses for char in self.word)", "def game_is_finished():\n for room in ALLROOMS.values():\n if not room.needs_complete(): # any room is incomplete, not done\n return False\n return True # all rooms complete, game is finished" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Asigna un usuario al proyecto
def asignarUsuario(self, proyecto, user, rol): if user in proyecto.users: return ":NO asigno usuario: el usuario ya es miembro del proyecto" if rol in user.roles: return ":NO asigno el usuario: el usuario ya tiene asignado el rol" else: user.estado = "Activo" # asigna el rol al usuario user.roles.append(rol) # asigna el usuario al proyecto proyecto.users.append(user) db.session.commit() return ":asigno el usuario => " + user.name + "al proyecto => " + proyecto.nombre +" con el rol => "+ rol.nombre + ":"
[ "def agregarUsuario(self, id_proyecto, id_usuario):\n try:\n usuario = DBSession.query(Usuario).get(id_usuario)\n proyecto = DBSession.query(Proyecto).get(id_proyecto)\n usuario.proyectos.append(proyecto)\n DBSession.flush()\n transaction.commit()\n except IntegrityError:\n transaction.abort()\n flash(_(\"No se ha asignado dicho Usuario! Hay Problemas con el servidor...\"), 'error')\n redirect(\"/admin/proyecto/usuarioProyecto\", id_proyecto=id_proyecto)\n except SQLAlchemyError:\n flash(_(\"No se ha asignado dicho Usuario! SQLAlchemyError...\"), 'error')\n redirect(\"/admin/proyecto/usuarioProyecto\", id_proyecto=id_proyecto)\n except (AttributeError, NameError):\n flash(_(\"No se ha asignado decho Usuario! Hay Problemas con el servidor...\"), 'error')\n redirect(\"/admin/proyecto/usuarioProyecto\", id_proyecto=id_proyecto)\n else:\n flash(_(\"Usuario asignado!\"), 'ok')\n\n redirect(\"/admin/proyecto/usuarioProyecto\",id_proyecto=id_proyecto)", "def press_Registrar():\n\n usuario = app.entry(\"Usuario\")\n hashPassword = secure.hashingPassword(app.entry(\"Contrasenia\"))\n\n # Creamos el usuario con su nombre, password hasheada y su salt.\n # Cuando realice el login, se actualizaran los datos IP, Puerto, estadoUser,...\n if api.create_User(usuario, hashPassword):\n app.infoBox(\"Registrado con exito\", \"Te registraste como: \"+usuario)\n else:\n app.errorBox(\"Error en el registro\", \"Hubo un error a la hora de registrarse \")", "def signup():\n\n global active_user\n req = request.get_json(force=True, silent=True)\n username = req.get('username')\n password = req.get('password')\n # print(user, password)\n try:\n user = User.get(user_id=username)\n if not user:\n print('i was here')\n # create_user(userid = user, password = password)\n User(user_id=username, password=password)\n active_user = username\n return \"SUCESSS, Your ID is created\"\n else:\n return \"FALIURE, Your ID was already taken\"\n except Exception as e:\n return str(e)", "def post(self,Utilisateur,mdp):\r\n return createUser(login,Utilisateur,mdp,\"\")", "def post_create_user(self, user_name, password, osutils):", "def register(request):\n\tif request.method == \"POST\":\n\t\tform = UserRegisterForm(request.POST)\n\t\tif form.is_valid():\n\t\t\trequest.session['username'] = form.cleaned_data['userName']\n \t \trequest.session['password'] = form.cleaned_data['userPwd']\n \t\temail = form.cleaned_data['userEmail']\n\t\t\t# Work around - login as admin keystone client for user creation\n\t\t\t# then logged into new user from projects page arrival\n\t\t\tapi.joinTenant('admin', 'admin', 'demo')\n\t\t\t# register user with keystone\n\t\t\tapi.registerUser(request.session['username'], request.session['password'], email)\t\n\t\t\t# login as new user\n\t\t\t#api.login(request.session['username'], request.session['password'])\n\treturn HttpResponseRedirect('/projects/')", "def enterProject(request):\n if request.method == 'POST':\n form = TenantLoginForm(request.POST)\n if form.is_valid():\n tenantName = form.cleaned_data['tenantName']\n\t\t\trequest.session['tenant'] = tenantName\n\t\t\ttenantID = form.cleaned_data['tenantID']\n\t\t\t\n\t\t\t# send session user/pw and selected tenant to keystone\n\t\t\tapi.joinTenant(request.session['username'], request.session['password'], tenantName)\n\t\t\treturn HttpResponseRedirect('/project_space/manage')\n\tprint('Invalid User')\n\treturn HttpResponseRedirect('/projects/')", "def signup(self, request, user):\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n\n if request.session.has_key('unfinished_checkout'):\n\n user.checkout_product_pk=\\\n request.session['unfinished_product_pk']\n logger.info('Benutzer [%s] wird gespeichert mit Wunsch: [%s]'\n % (user, user.checkout_product_pk))\n user.save()", "def eliminar_usuario_proyecto(self, id_proyecto, id_usuario, **kw):\n try:\n usuario = DBSession.query(Usuario).get(id_usuario)\n proyecto = DBSession.query(Proyecto).get(id_proyecto)\n\n if usuario.proyectos.count(proyecto) >= 1: \n usuario.proyectos.remove(proyecto)\n\n DBSession.flush()\n transaction.commit()\n except IntegrityError:\n transaction.abort()\n flash(_(\"No se ha desasignado dicho Usuario! Hay Problemas con el servidor...\"), 'error')\n redirect(\"/admin/proyecto/usuarios\", id_proyecto=id_proyecto)\n except SQLAlchemyError:\n flash(_(\"No se ha desasignado dicho Usuario! SQLAlchemyError...\"), 'error')\n redirect(\"/admin/proyecto/usuarios\", id_proyecto=id_proyecto)\n except (AttributeError, NameError):\n flash(_(\"No se ha desasignado decho Usuario! Hay Problemas con el servidor...\"), 'error')\n redirect(\"/admin/proyecto/usuarios\", id_proyecto=id_proyecto)\n else:\n flash(_(\"Usuario desasignado!\"), 'ok')\n\n redirect(\"/admin/proyecto/usuarios\", id_proyecto=id_proyecto)", "def adduser(username, accesskey, secretkey, pkname=None):\n settings.add_user(username, accesskey, secretkey, pkname)", "def agregarRolUser(self, id_usuario, id_rol, id_proyecto):\n try:\n rol = DBSession.query(Rol).get(id_rol)\n usuario = DBSession.query(Usuario).get(id_usuario)\n rol.usuarios.append(usuario)\n DBSession.flush()\n transaction.commit()\n except IntegrityError:\n transaction.abort()\n flash(_(\"No se pudo Asignar Rol! Hay Problemas con el servidor...\"), 'error')\n redirect(\"/admin/proyecto/rolesProyectoUsuario\", id_proyecto=id_proyecto, id_usuario=id_usuario)\n except SQLAlchemyError:\n flash(_(\"No se pudo Asignar Rol! SQLAlchemyError...\"), 'error')\n redirect(\"/admin/proyecto/rolesProyectoUsuario\", id_proyecto=id_proyecto, id_usuario=id_usuario)\n except (AttributeError, NameError):\n flash(_(\"No se pudo Asignar Rol! Hay Problemas con el servidor...\"), 'error')\n redirect(\"/admin/proyecto/rolesProyectoUsuario\", id_proyecto=id_proyecto, id_usuario=id_usuario)\n else:\n flash(_(\"Rol asignado!\"), 'ok')\n\n redirect(\"/admin/proyecto/rolesProyectoUsuario\", id_proyecto=id_proyecto, id_usuario=id_usuario)", "def sign_up(request, email, password, first_name, last_name):\n\n UserService.create(email, password, first_name, last_name)\n return IdentityService.sign_in(request, email, password)", "def test_view_inactivar_usuario(self):\n # se loguea el usuario testuser\n user = self.client.login(username='testuser', password='test')\n self.assertTrue(user)\n # se crea un usuario\n user = User.objects.create_user(username='user_prueba', email='test@test4.com', password='prueba')\n usuario_prueba = Usuario.objects.create(user=user, telefono='222', direccion='Avenida')\n # se marca al usuario como inactivo\n usuario_prueba.user.is_active = False\n usuario_prueba.save()\n\n self.assertEqual(usuario_prueba.user.is_active, False)\n\n print 'Test de inactivar_usuario realizado exitosamente'", "def asignarpermiso():\n if not current_user.is_authenticated():\n flash('Debe loguearse primeramente!!!!', 'loggin')\n return render_template('index.html')\n \n permission = UserRol('ADMINISTRADOR')\n if permission.can(): \n #=======================================================================\n # desde aqui : Ejecuta esta seccion la primera vez que entra, cuando selecciona solo el usuario\n #=======================================================================\n idproyecto = request.args.get('idproyecto')\n idrol = request.args.get('idrol')\n rolobtenido = db_session.query(Rol).filter_by(id=idrol).first()\n if not rolobtenido == None:\n if str(rolobtenido.codigo) == 'ADMINISTRADOR':\n flash('El rol no tiene permisos', 'info')\n return redirect('/administrarrol')\n if request.method == 'POST':\n rol = request.form.get('idrol')\n permisos = request.form.getlist('permisos')\n #=======================================================================\n # Inserta los permisos seleccionados\n #=======================================================================\n for p in permisos :\n rolper = RolPermiso(rol, p)\n exits = db_session.query(RolPermiso).filter_by(id_rol=rol, id_permiso=p).first()\n if not exits:\n db_session.merge(rolper)\n db_session.commit()\n flash('Permisos asignados correctamente', 'info')\n return redirect('/administrarrol')\n if idproyecto == None:\n #significa que no se selecciono proyecto\n #obtiene permisos de un rol\n permisos = getPermisosByRol(idrol)\n if len(permisos) > 0:\n #si tiene permisos muestra directamente permisos del proyecto\n #obtiene idproyecto del permiso que tenga especificado una fase\n idproyecto = getProyectoByPermiso(permisos)\n #obtiene permisos no asignados al rol\n permisos = listadoPermisosNoAsignados(idproyecto, idrol)\n return render_template('rol/asignarpermisos.html', permisos = permisos, idrol = idrol, idproyecto = idproyecto)\n else : \n #si no tiene permisos asignados \n proyectos = db_session.query(Proyecto).order_by(Proyecto.nombre);\n return render_template('proyecto/seleccionproyecto.html', proyectos=proyectos, idrol=idrol)\n else:\n permisos = getPermisosByProyecto(idproyecto)\n return render_template('rol/asignarpermisos.html', permisos = permisos, idrol = idrol, idproyecto = idproyecto)\n else:\n flash('Sin permisos para asignar permisos al rol', 'permiso')\n return render_template('index.html')", "def agregarUsuario(self, usuario, password):\r\n\r\n\t\t\td=ConnectionDBClass.ConnectionDBClass(dbConnection)\r\n\t\t\tconn=d.connection_sqlite()\r\n\t\t\tusrConn = UsuariosClass.UsuariosClass(conn)\r\n\t\t\treturn usrConn.insert_user(usuario, password)", "def create_upload_user():\n url = '%s/%s' % (UPLOAD_ADMIN_URL, upload_user)\n print(url)\n r = requests.delete(url,\n auth=(upload_admin, upload_admin_pwd), verify=False)\n if r.status_code == 200 or r.status_code == 404:\n r = requests.post(\n '%s/%s/%s' % (UPLOAD_ADMIN_URL, upload_user, project),\n auth=(upload_admin, upload_admin_pwd), verify=False)\n if r.status_code == 200:\n global upload_user_pass\n upload_user_pass = r.json()['password']\n return r", "def add_user():\n\n username = request.form.get(\"username\")\n password = request.form.get(\"password\")\n\n new_user = User(email=username,\n password=password)\n\n db.session.add(new_user)\n db.session.commit()\n\n flash('Logged in')\n return redirect(\"/\")", "def save_user(form_instance):\r\n username = form_instance.cleaned_data['username']\r\n email = form_instance.cleaned_data['email']\r\n password = form_instance.cleaned_data['password1']\r\n new_user = User.objects.create_user(username, email, password)\r\n new_user.save()\r\n return new_user", "def principalForUser(user):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retorna usuarios de proyecto
def usersDeProyecto(self, nombre): proyecto = self.filtrar(nombre) return proyecto.users
[ "def usuarios_conectados():\n\n global my_user\n print(\"Actualizando clientes conectados.\")\n usuarios = api.get_AllUser()\n lista_usarios = []\n\n for user in usuarios:\n if user['Estado'] == '1':\n # Anadimos todos los users menos el propio.\n if user['Nombre'] != my_user:\n lista_usarios.append(user['Nombre'])\n\n if len(lista_usarios) == 0:\n lista_usarios = ['- Vacio -']\n\n return lista_usarios", "def listarUsuarios(self):\r\n\r\n\t\t\td=ConnectionDBClass.ConnectionDBClass(dbConnection)\r\n\t\t\tconn=d.connection_sqlite()\r\n\t\t\tusrConn = UsuariosClass.UsuariosClass(conn)\r\n\t\t\treturn usrConn.listar_usuarios()", "def listaUsuariosDeProyecto(self, proyecto):\n thead = self.estiloHoja['Heading5']\n thead.alignment=TA_CENTER\n tbody = self.estiloHoja[\"BodyText\"]\n tbody.alignment=TA_LEFT\n cabecera = [Paragraph('Nick',thead),Paragraph('Nombre',thead),Paragraph('Apellido',thead),Paragraph('Email',thead),Paragraph('Estado',thead),Paragraph('Rol en el Proyecto',thead)]\n contenido = [cabecera]\n lista = MgrProyecto().usersDeProyecto(proyecto.nombre)\n tabla = Table(contenido)\n for u in lista:\n rol = MgrUser().rolDeUser(u, proyecto.nombre)\n contenido.append([Paragraph(u.name,tbody), Paragraph(u.nombre,tbody), Paragraph(u.apellido,tbody), Paragraph(u.email,tbody), Paragraph(u.estado,tbody), Paragraph(rol.nombre,tbody)])\n tabla = Table(contenido) \n tabla.setStyle(self.tablaStyle)\n return tabla", "def colaboradores(idpiz):\n colaboradores= []\n act= Actividad.objects.filter(idpizactividad= idpiz, is_active = True).distinct('loginasignado')\n for elem in act:\n persona = elem.loginasignado\n usuario = User.objects.get(username= persona)\n if usuario.is_active == True:\n\t colaboradores.append(usuario)\n\n return colaboradores", "def get_users():\n return [x.pw_name for x in pwd.getpwall() if user_valid(x)]", "def listaUsuariosDeComite(self, proyecto):\n #Estilos de la tabla para cabeceras y datos\n thead = self.estiloHoja['Heading5']\n thead.alignment=TA_CENTER\n tbody = self.estiloHoja[\"BodyText\"]\n tbody.alignment=TA_LEFT\n cabecera = [Paragraph('Nick',thead),Paragraph('Nombre',thead),Paragraph('Apellido',thead),Paragraph('Email',thead),Paragraph('Estado',thead)]\n contenido = [cabecera]\n lista = MgrComite().miembrosComite(proyecto.nombre)\n tabla = Table(contenido)\n for u in lista:\n contenido.append([Paragraph(u.name,tbody), Paragraph(u.nombre,tbody), Paragraph(u.apellido,tbody), Paragraph(u.email,tbody), Paragraph(u.estado,tbody)])\n tabla = Table(contenido) \n tabla.setStyle(self.tablaStyle)\n return tabla", "def listUsers():\n exec_get_all('SELECT username FROM users')", "def get_users(session, tg_host, org_id):\r\n url = f\"https://{tg_host}/api/v3/organizations/{org_id}/users\"\r\n return session.get(url)", "def get_users(self):\r\n sql = \"SELECT * FROM user WHERE auth <> 'root' LIMIT \" + str(self.user_per_page) + \" OFFSET \" + str(self.offset)\r\n self.cur.execute(sql)\r\n data = self.cur.fetchall()\r\n return data", "def get_users(self):\n\n username_regex = (\n r\"^username\\s+\\\"(?P<username>\\S+)\\\"\\s+password\\s+(?P<pwd_hash>[0-9a-f]+).*\"\n )\n\n raw_show_users_accounts = self._send_command(\"show users accounts\")\n show_users_accounts = textfsm_extractor(\n self, \"show_users_accounts\", raw_show_users_accounts\n )\n users = {}\n for user in show_users_accounts:\n users[user[\"username\"]] = {\n \"level\": int(user[\"priv\"]),\n \"password\": \"\",\n \"sshkeys\": [],\n }\n\n command = \"show running-config | section username\"\n output = self._send_command(command)\n\n for match in re.finditer(username_regex, output, re.M):\n username = match.groupdict()[\"username\"]\n pwd_hash = match.groupdict()[\"pwd_hash\"]\n users[username][\"password\"] = pwd_hash\n\n return users", "def all_usuarios(cls, request):\n query_usuarios = request.dbsession.query(User).filter_by().all()\n\n return query_usuarios", "def get_all_users():\n return get_user_model().objects.all()", "def selectRoleU(self):\n cursor = self.bbdd.cursor()\n cursor.execute(\"select role from Usuarios\")\n listaRoles=list()\n for codu in cursor:\n listaRoles.append(codu)\n return listaRoles", "def get_all_usernames():\n return list(map(lambda u: u.username, get_all_users()))", "def show_all_users():\n current_user_role = get_jwt_identity()['role']\n url = app.config[\"USERS_URL\"]\n user_service = UserProxyAccess(url)\n response = user_service.get_all_users()\n return jsonify(response), 200", "def obtenerNombre(self):\n\t\treturn self.usuario;", "def get_all():\n return list(User.objects.all())", "def get_users() -> list:\n ans = DatabaseConnector.get_values(\"SELECT * FROM user ORDER BY registry_date DESC \")\n\n return ans", "def get_usuario(self):\r\n return self.usuario" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Busca proyecto por Id
def filtrarXId(self, idProyecto): return Proyecto.query.filter(Proyecto.idProyecto == idProyecto).first_or_404()
[ "def siguiente(self,id):\n consulta = \"select * from socios m \" \\\n \"where m.idsocio = (select min(idsocio) from socios s \" \\\n \"where s.idsocio > %s);\"\n try:\n datos = AccesoDatos()\n cur = datos.conectar()\n cur.execute(consulta,(id))\n d = cur.fetchone()\n socio = Socio(d[1],d[2],d[3],d[0])\n datos.desconectar()\n except OperationalError as e:\n raise Exception(\"ERROR FATAL\")\n except Exception as a:\n raise Exception(\"Error al conectar a la base de datos\")\n print(\"ID : \", socio.id, \"\\nNombre: \", socio.nombre, \"\\nApellido: \", socio.apellido, \"\\nDNI: \", socio.dni)", "def filtrar(self, nombre):\n return Proyecto.query.filter(Proyecto.nombre == nombre).first_or_404()", "def get_id(self):\n\t\ttry:\n\t\t\tself.id = self.personaje['data']['results'][0]['id']\n\t\t\tprint(self.id)\n\t\texcept:\n\t\t\tprint('llama primero get_personaje')", "def get_by_id(self, id_socio):\n return self.datos.buscar(id_socio)", "def obtener(id: int):\n producto_por_canasta = None\n try:\n #Conexión a la base de datos usando ruta de archivo de configuración\n db = sqlite3.connect(Config.get(\"DATABASE\"), detect_types=sqlite3.PARSE_DECLTYPES)\n #Objeto cursor\n cursor = db.cursor()\n query = \"\"\"SELECT * FROM productos_por_canasta WHERE id = '{}'\"\"\".format(id)\n #Imprimir query a ejecutar\n print(\"\\033[38;5;57m\" + \"\\033[1m\" + query + \"\\033[0m\")\n cursor.execute(query)\n producto_por_canasta = cursor.fetchone()\n except Exception as e:\n print(\"Error: {}\".format(e))\n finally:\n #Cerrar conexión de db\n db.close()\n if producto_por_canasta == None:\n print(\"No se pudo encontrar un producto_por_pedido con el id:\", id)\n else:\n return ProductoPorCanasta(id = producto_por_canasta[0], producto = Producto.obtener(producto_por_canasta[1]), canasta = Canasta.obtener(producto_por_canasta[2]), cantidad = producto_por_canasta[3], fecha_de_creacion = producto_por_canasta[4], fecha_de_actualizacion = producto_por_canasta[5])", "def get_sujeto_at(self, id_sujeto):\n contador = 0\n for sujeto in self.lista_general:\n #print(\"s: \", sujeto.get_nombre(), \" \", contador, \" - \", id_sujeto)\n if contador == id_sujeto-1:\n return (0, sujeto.get_nombre())\n contador += 1\n return (-1, \"Sujeto no encontrado...\")", "def ver_actividades_proyecto(request, flujo_id, proyecto_id):\n proyecto = get_object_or_404(Proyecto, id=proyecto_id)\n flujo = get_object_or_404(Flujo, id=flujo_id)\n user = User.objects.get(username=request.user.username)\n userRolProy = UsuarioRolProyecto.objects.filter(proyecto=proyecto_id)\n roles = UsuarioRolProyecto.objects.filter(usuario = user, proyecto = proyecto).only('rol')\n permisos_obj = []\n for i in roles:\n permisos_obj.extend(i.rol.permisos.all())\n permisos = []\n for i in permisos_obj:\n permisos.append(i.nombre)\n fluActProy = FlujoActividadProyecto.objects.filter(flujo = flujo_id, proyecto = proyecto_id).order_by('orden')\n actList = {}\n ultActividad = 0\n for rec in fluActProy:\n if not actList.has_key(rec.flujo.id):\n actList[rec.flujo.id] = {}\n if not actList[rec.flujo.id].has_key(int(rec.orden)):\n actList[rec.flujo.id][int(rec.orden)] = {}\n if not actList[rec.flujo.id][int(rec.orden)].has_key(rec.actividad.id):\n actList[rec.flujo.id][int(rec.orden)][rec.actividad.id] = []\n act = Actividad.objects.get(nombre = rec.actividad)\n actList[rec.flujo.id][int(rec.orden)][rec.actividad.id].append(act.nombre)\n actList[rec.flujo.id][int(rec.orden)][rec.actividad.id].append(act.descripcion)\n ultActividad = int(rec.orden)\n if actList:\n actDict = actList[int(flujo_id)]\n else:\n actDict = None\n lista = User.objects.all().order_by(\"id\")\n proyPend = False\n if proyecto.estado == 1:\n proyPend = True\n ctx = {'flujo':flujo,\n 'proyecto':proyecto,\n 'actividades':actDict,\n 'proyPend':proyPend,\n 'ultActividad':ultActividad,\n 'ver_flujo': 'ver flujo' in permisos,\n 'asignar_actividades_proyecto': 'asignar actividades proyecto' in permisos\n }\n return render_to_response('proyectos/admin_actividades_proyecto.html', ctx, context_instance=RequestContext(request))", "def affaire_by_id_view(request):\n # Check connected\n if not check_connected(request):\n raise exc.HTTPForbidden()\n\n id = request.matchdict['id']\n query = request.dbsession.query(VAffaire)\n one = query.filter(VAffaire.id == id).first()\n return Utils.serialize_one(one)", "def get_id(idd: int) -> Cliente:\n conn = GenericDao.connect()\n cursor = conn.execute(\"SELECT * FROM clientes where id = ?\", (str(idd),))\n row = cursor.fetchone()\n cliente = Cliente(row[1], row[2], row[3], row[4], row[5], row[0])\n conn.close()\n if debug:\n print(str(cliente))\n return cliente", "def obtener(id: int):\n producto = None\n try:\n #Conexión a la base de datos usando ruta de archivo de configuración\n db = sqlite3.connect(Config.get(\"DATABASE\"), detect_types=sqlite3.PARSE_DECLTYPES)\n #Objeto cursor\n cursor = db.cursor()\n query = \"\"\"SELECT * FROM productos WHERE id = '{}'\"\"\".format(id)\n #Imprimir query a ejecutar\n print(\"\\033[38;5;57m\" + \"\\033[1m\" + query + \"\\033[0m\")\n cursor.execute(query)\n producto = cursor.fetchone()\n #query = \"\"\"SELECT categorias.* FROM categorias JOIN productos ON\n # categorias.id = productos.id_categoria WHERE productos.id = {}\"\"\".format(id)\n #cursor.execute(query)\n #categoria = cursor.fetchone()\n #query = \"\"\"SELECT comercios.* FROM comercios JOIN productos ON comercios.id = productos.id_comercio WHERE productos.id = {}\"\"\".format(id)\n #cursor.execute(query)\n #comercio = cursor.fetchone()\n except Exception as e:\n print(\"Error: {}\".format(e))\n finally:\n #Cerrar conexión de db\n db.close()\n if producto == None:\n print(\"No se pudo encontrar un Producto con el id:\", id)\n else:\n return Producto(id = producto[0], nombre = producto[1], descripcion = producto[2], fotos_url = producto[3].split(\",\"), precio = producto[4], stock = producto[5], fecha_de_creacion = producto[6], fecha_de_actualizacion = producto[7])", "def obtenerDatosId(self, idadi):\n\n # SQL\n consulta1 = \"\"\"\n SELECT adi.add_id, \n p.per_nombres||' '||p.per_apellidos persona,\n adi.nac_id, adi.add_lugarnac, adi.san_id, adi.add_alergias, adi.add_capacdife, adi.add_foto, adi.add_estado\n FROM membresia.admision_adicionales adi\n left join referenciales.personas p on adi.add_id = p.per_id\n where adi.add_id = %s AND adi.add_estado <> false;\n \"\"\"\n\n try:\n\n conexion = Conexion()\n con = conexion.getConexion()\n cur = con.cursor()\n cur.execute(consulta1, (idadi, ))\n\n return cur.fetchone()\n\n except con.Error as e:\n print(e.pgerror)\n return False\n finally:\n if con is not None:\n cur.close()\n con.close()", "def _get_vios_pk_id(context, vios_dom_id, session):\n # Invert the saved mapping\n pk2dom_map = DtoBase_sqla._vios_id_pk2dom_map\n# vios_id_to_pk_id_map = {val: key for key, val in pk2dom_map.iteritems()}\n vios_id_to_pk_id_map = {}\n for key, value in pk2dom_map.iteritems():\n vios_id_to_pk_id_map[value] = key\n if vios_dom_id in vios_id_to_pk_id_map:\n return vios_id_to_pk_id_map[vios_dom_id]\n else:\n filters = {'id': vios_dom_id}\n _map_dom_filters(context, filters)\n query = model_query(context, VIO_SERVER_DTO)\n vios_dto = query.filter_by(**filters).first()\n DtoBase_sqla._vios_id_pk2dom_map[vios_dto._pk_id] = vios_dom_id\n return vios_dto._pk_id", "def crear_socio_con_id(self):\n id=input(\"Ingrese la ID del socio a modificar: \")\n dni=input(\"Ingrese dni del socio: \")\n nombre=input(\"Ingrese nombre del socio: \")\n apellido=input(\"Ingrese apellido del socio: \")\n return Socio(dni,nombre,apellido,id)", "def colaboradores(idpiz):\n colaboradores= []\n act= Actividad.objects.filter(idpizactividad= idpiz, is_active = True).distinct('loginasignado')\n for elem in act:\n persona = elem.loginasignado\n usuario = User.objects.get(username= persona)\n if usuario.is_active == True:\n\t colaboradores.append(usuario)\n\n return colaboradores", "def busqueda_get_datos(self):\r\n obj_equipo=equipo()\r\n self.estado=True\r\n while self.estado:\r\n self.mostrar_todos()\r\n #pedirle al usuario que ingrese el nombre del equipo a buscar\r\n try:\r\n id=int(input(Fore.YELLOW+\"Ingresa el ID del equipo: \"+Fore.RESET))\r\n continua=True\r\n except ValueError:\r\n print(Fore.WHITE,Back.RED+\" Ingrese un valor numerico\"+Fore.RESET,Back.RESET)\r\n continua=False\r\n if continua:\r\n #llama a la funcion buscar_registro de la clase conexion_equipos \r\n estatus=self.obj_conexion.obtener_registro(id)\r\n #si el estatus es true\r\n if estatus:\r\n #convierte estatus a una lista \r\n obj_equipo.set_id(estatus[0][0])\r\n obj_equipo.set_nombre(estatus[0][1])\r\n obj_equipo.set_modelo(estatus[0][2])\r\n obj_equipo.set_serie(estatus[0][3])\r\n obj_equipo.set_ip(estatus[0][4])\r\n obj_equipo.set_usuario(estatus[0][5])\r\n obj_equipo.set_password(estatus[0][6])\r\n obj_equipo.set_secret(estatus[0][7])\r\n obj_equipo.conexion()\r\n #llama a get y set de la clase conexion_equipos \r\n print(Fore.GREEN+\" Registro encontrado correctamente\\n\"+Fore.RESET)\r\n tabla=PrettyTable()\r\n tabla.field_names=[\"ID\",\"NOMBRE\",\"MODELO\",\"SERIE\",\"IP\",\"USUARIO\",\"PASSWORD\",\"SECRET\"]\r\n for i in estatus:\r\n tabla.add_row(i)\r\n print(tabla)\r\n while True:\r\n #muestrae el menu secundario\r\n menu_secundario()\r\n opcion=input(Fore.YELLOW+\" Seleccione una opcion: \"+Fore.RESET)\r\n if opcion.upper() in [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\"]:\r\n if opcion.upper()==\"1\":\r\n print(Fore.YELLOW+\" Nombre: \"+obj_equipo.hostname()+Fore.RESET)\r\n elif opcion.upper()==\"2\":\r\n print(Fore.YELLOW+\" Usuario: \"+obj_equipo.user()+Fore.RESET)\r\n elif opcion.upper()==\"3\":\r\n print(Fore.YELLOW+\" Password: \"+obj_equipo.clave()+Fore.RESET)\r\n elif opcion.upper()==\"4\":\r\n print(Fore.YELLOW+\" Usuarios: \"+str(obj_equipo.get_usuarios())+Fore.RESET)\r\n elif opcion.upper()==\"5\":\r\n print(Fore.YELLOW+\" Interfaces: \"+ str(obj_equipo.get_interfaces())+Fore.RESET)\r\n elif opcion.upper()==\"6\":\r\n print(Fore.YELLOW+\" Aplicar Configuracion: \"+str(obj_equipo.set_configuracion())+Fore.RESET)\r\n elif opcion.upper()==\"7\":\r\n #rompe el ciclo\r\n self.estado=False\r\n break\r\n else:\r\n print(Fore.WHITE,Back.RED+\" Registro no encontrado\"+Fore.RESET,Back.RESET)", "def datosProyecto(self, proyecto):\n thead = self.estiloHoja['Heading5']\n thead.alignment=TA_CENTER\n tbody = self.estiloHoja[\"BodyText\"]\n tbody.alignment=TA_LEFT\n contenido=[]\n contenido.append([Paragraph('Nombre de Proyecto',thead),Paragraph(proyecto.nombre,tbody)])\n tabla = Table(contenido)\n lider = MgrProyecto().getLider(proyecto.nombre)\n contenido.append([Paragraph('Lider de Proyecto',thead),Paragraph(lider,tbody)])\n contenido.append([Paragraph('Estado de Proyecto',thead),Paragraph(proyecto.estado,tbody)])\n contenido.append([Paragraph('Presupuesto de Proyecto',thead),Paragraph(str(proyecto.presupuesto),tbody)])\n contenido.append([Paragraph('Fecha de Creacion de Proyecto',thead),Paragraph(str(proyecto.fechaDeCreacion),tbody)])\n contenido.append([Paragraph('Descripcion del Proyecto',thead),Paragraph(proyecto.descripcion,tbody)])\n comite = MgrComite().search(proyecto.nombre)\n contenido.append([Paragraph('Nombre de Comite del Proyecto',thead),Paragraph(comite.nombre,tbody)])\n contenido.append([Paragraph('Cantidad de Miembros',thead),Paragraph(str(comite.cantMiembro),tbody)])\n tabla = Table(contenido)\n tabla.setStyle(self.tablaStyle)\n return tabla", "def editar(self, id_proyecto, **kw):\n try:\n tmpl_context.form = editar_proyecto_form \n traerProyecto=DBSession.query(Proyecto).get(id_proyecto)\n\n if traerProyecto.iniciado:\n flash(_(\"El proyecto no puede modificarse porque ya se encuentra iniciado.\"), 'error')\n redirect(\"/admin/proyecto/listado\")\n\n kw['id_proyecto']=traerProyecto.id_proyecto\n kw['id_usuario']=traerProyecto.id_usuario\n kw['nombre']=traerProyecto.nombre\n kw['descripcion']=traerProyecto.descripcion\n kw['fecha']=traerProyecto.fecha\n kw['iniciado']=traerProyecto.iniciado\n except SQLAlchemyError:\n flash(_(\"No se pudo acceder a Edicion de Proyectos! SQLAlchemyError...\"), 'error')\n redirect(\"/admin/proyecto/listado\")\n except (AttributeError, NameError):\n flash(_(\"No se pudo acceder a Edicion de Proyecto! Hay Problemas con el servidor...\"), 'error')\n redirect(\"/admin/proyecto/listado\")\n\n return dict(nombre_modelo='Proyecto', page='editar_proyecto', value=kw)", "def test_humangenes_id_get(self):\n pass", "def get_comentarios_perfil(cliente_id):\n comentarios = ComentarioPerfil.objects.filter(cliente__user__id=cliente_id).order_by(\"-fecha\")\n \n return {'comentarios': comentarios}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retorna fases del proyecto
def fasesDeProyecto(self, nombre): proyecto = self.filtrar(nombre) return proyecto.listafases
[ "def createFase():\n # proyecto 1\n p = MgrProyecto().filtrar(\"proyecto1\")\n t = MgrTipoDeItem().filtrar(\"TipoDeItem1\")\n f = Fase(nombre=\"proyecto1-fase1\", descripcion=\"nueva fase\", orden=1, proyectoId= p.idProyecto, tipoDeItemId=t.idTipoDeItem)\n MgrFase().guardar(f)\n \n p = MgrProyecto().filtrar(\"proyecto1\")\n t = MgrTipoDeItem().filtrar(\"TipoDeItem1\")\n f = Fase(nombre=\"proyecto1-fase2\", descripcion=\"nueva fase\", orden=2, proyectoId= p.idProyecto, tipoDeItemId=t.idTipoDeItem)\n MgrFase().guardar(f)\n \n p = MgrProyecto().filtrar(\"proyecto1\")\n t = MgrTipoDeItem().filtrar(\"TipoDeItem2\")\n f = Fase(nombre=\"proyecto1-fase3\", descripcion=\"nueva fase\", orden=3, proyectoId= p.idProyecto, tipoDeItemId=t.idTipoDeItem)\n MgrFase().guardar(f)\n \n p = MgrProyecto().filtrar(\"proyecto1\")\n t = MgrTipoDeItem().filtrar(\"TipoDeItem3\")\n f = Fase(nombre=\"proyecto1-fase4\", descripcion=\"nueva fase\", orden=4, proyectoId= p.idProyecto, tipoDeItemId=t.idTipoDeItem)\n MgrFase().guardar(f)\n \n # proyecto 2\n p = MgrProyecto().filtrar(\"proyecto2\")\n t = MgrTipoDeItem().filtrar(\"TipoDeItem3\")\n f = Fase(nombre=\"proyecto2-fase1\", descripcion=\"nueva fase\", orden=1, proyectoId= p.idProyecto, tipoDeItemId=t.idTipoDeItem)\n MgrFase().guardar(f)\n \n p = MgrProyecto().filtrar(\"proyecto2\")\n t = MgrTipoDeItem().filtrar(\"TipoDeItem2\")\n f = Fase(nombre=\"proyecto2-fase2\", descripcion=\"nueva fase\", orden=2, proyectoId= p.idProyecto, tipoDeItemId=t.idTipoDeItem)\n MgrFase().guardar(f)\n \n p = MgrProyecto().filtrar(\"proyecto2\")\n t = MgrTipoDeItem().filtrar(\"TipoDeItem4\")\n f = Fase(nombre=\"proyecto2-fase3\", descripcion=\"nueva fase\", orden=3, proyectoId= p.idProyecto, tipoDeItemId=t.idTipoDeItem)\n MgrFase().guardar(f) \n \n p = MgrProyecto().filtrar(\"proyecto2\")\n t = MgrTipoDeItem().filtrar(\"TipoDeItem2\")\n f = Fase(nombre=\"proyecto2-fase4\", descripcion=\"nueva fase\", orden=4, proyectoId= p.idProyecto, tipoDeItemId=t.idTipoDeItem)\n MgrFase().guardar(f)", "def nroDeFaseDeProyecto(self, nombre):\n proyecto = self.filtrar(nombre)\n cont = 0\n for i in proyecto.listafases:\n if i != None:\n cont = cont + 1\n return cont", "def createProyecto():\n # crea un proyecto\n p = Proyecto(nombre=\"proyecto1\", descripcion=\"sistema 1\", presupuesto=10000)\n MgrProyecto().guardar(p)\n per = MgrPermiso().filtrarXModulo(\"ModuloGestion\")\n r = Rol(nombre=\"LiderDeProyecto\", descripcion=\"rol de lider\", ambito= p.nombre, permisos=per)\n MgrRol().guardar(r)\n MgrProyecto().asignarLider(proyecto = p , rol = r, nameLider = \"lory\")\n p = MgrProyecto().filtrar(\"proyecto1\")\n c = Comite(nombre=\"comite-proyecto1\", descripcion=\"comite de cambio\", cantMiembro=3, proyectoId=p.idProyecto)\n MgrComite().guardar(c)\n u = MgrProyecto().getUserLider(p.idProyecto)\n MgrComite().asignarUsuario(p,u)\n\n p = Proyecto(nombre=\"proyecto2\", descripcion=\"sistema 2\", presupuesto=20000)\n MgrProyecto().guardar(p)\n per = MgrPermiso().filtrarXModulo(\"ModuloGestion\")\n r = Rol(nombre=\"LiderDeProyecto\", descripcion=\"rol de lider\", ambito= p.nombre, permisos=per)\n MgrRol().guardar(r)\n MgrProyecto().asignarLider(proyecto = p , rol = r, nameLider = \"vavi\")\n p = MgrProyecto().filtrar(\"proyecto2\")\n c = Comite(nombre=\"comite-proyecto2\", descripcion=\"comite de cambio\", cantMiembro=3, proyectoId=p.idProyecto)\n MgrComite().guardar(c)\n u = MgrProyecto().getUserLider(p.idProyecto)\n MgrComite().asignarUsuario(p,u)\n\n p = Proyecto(nombre=\"proyecto3\", descripcion=\"sistema 3\", presupuesto=30000)\n MgrProyecto().guardar(p)\n per = MgrPermiso().filtrarXModulo(\"ModuloGestion\")\n r = Rol(nombre=\"LiderDeProyecto\", descripcion=\"rol de lider\", ambito= p.nombre, permisos=per)\n MgrRol().guardar(r)\n MgrProyecto().asignarLider(proyecto = p , rol = r, nameLider = \"guille\")\n p = MgrProyecto().filtrar(\"proyecto3\")\n c = Comite(nombre=\"comite-proyecto3\", descripcion=\"comite de cambio\", cantMiembro=3, proyectoId=p.idProyecto)\n MgrComite().guardar(c)\n u = MgrProyecto().getUserLider(p.idProyecto)\n MgrComite().asignarUsuario(p,u)\n \n p = Proyecto(nombre=\"proyecto4\", descripcion=\"sistema 4\", presupuesto=40000)\n MgrProyecto().guardar(p)\n per = MgrPermiso().filtrarXModulo(\"ModuloGestion\")\n r = Rol(nombre=\"LiderDeProyecto\", descripcion=\"rol de lider\", ambito= p.nombre, permisos=per)\n MgrRol().guardar(r)\n MgrProyecto().asignarLider(proyecto = p , rol = r, nameLider = \"stfy\")\n p = MgrProyecto().filtrar(\"proyecto3\")\n c = Comite(nombre=\"comite-proyecto3\", descripcion=\"comite de cambio\", cantMiembro=2, proyectoId=p.idProyecto)\n MgrComite().guardar(c)\n u = MgrProyecto().getUserLider(p.idProyecto)\n MgrComite().asignarUsuario(p,u)\n\n print \":cargo proyectos:\"", "def proyectoFinalizado(self, nombre):\n proyecto = self.filtrar(nombre)\n for fase in proyecto.listafases:\n if fase.estado != \"Finalizado\":\n return False\n return True", "def Inferencias(self):\r\n self.GeneroMasculinoDisfrute = 0\r\n self.GeneroFemeninoDisfrute = 0\r\n self.GeneroMasculinoFacilidad = 0\r\n self.GeneroFemeninoFacilidad = 0\r\n self.EdadValor = 0\r\n self.NumeroUsuarios = 0\r\n self.ValorGeneroFacilidad = \"\"\r\n self.ValorGeneroDisfrute = \"\"\r\n self.valoredad = 0\r\n self.EdadMayor = 0\r\n for i in onto.Usuario.instances(): \r\n self.NumeroUsuarios = 1 + self.NumeroUsuarios\r\n for prop in i.get_properties():\r\n for value in prop[i]:\r\n if prop.python_name == \"Facilidad_percibida_de_uso\" and value>59:\r\n for prop in i.get_properties():\r\n for value in prop[i]:\r\n if prop.python_name == \"Genero\":\r\n if str(value) == \"Masculino\":\r\n self.GeneroMasculinoFacilidad = 1 + self.GeneroMasculinoFacilidad\r\n elif str(value) == \"Femenino\":\r\n self.GeneroFemeninoFacilidad = 1 + self.GeneroFemeninoFacilidad \r\n if self.GeneroMasculinoFacilidad >= self.GeneroFemeninoFacilidad:\r\n self.ValorGeneroFacilidad = \"Masculino\"\r\n if self.GeneroMasculinoFacilidad <= self.GeneroFemeninoFacilidad:\r\n self.ValorGeneroFacilidad = \"Femenino\"\r\n if prop.python_name == \"Disfrute_percibido\" and value>59:\r\n for prop in i.get_properties():\r\n for value in prop[i]:\r\n if prop.python_name == \"Genero\":\r\n if str(value) == \"Masculino\":\r\n self.GeneroMasculinoDisfrute = 1 + self.GeneroMasculinoDisfrute\r\n elif str(value) == \"Femenino\":\r\n self.GeneroFemeninoDisfrute = 1 + self.GeneroFemeninoDisfrute\r\n if self.GeneroMasculinoDisfrute >= self.GeneroFemeninoDisfrute:\r\n self.ValorGeneroDisfrute= \"Masculino\"\r\n if self.GeneroMasculinoDisfrute <= self.GeneroFemeninoDisfrute:\r\n self.ValorGeneroDisfrute = \"Femenino\"\r\n if prop.python_name == \"Edad\":\r\n self.EdadValor = int(value) + self.EdadValor\r\n self.valoredad = int(value)\r\n if self.EdadMayor < self.valoredad:\r\n self.EdadMayor = self.valoredad\r\n\r\n if self.NumeroUsuarios==0:\r\n self.NumeroUsuarios=1\r\n PromedioEdad = (self.EdadValor / self.NumeroUsuarios)\r\n MayorEdad.set(self.EdadMayor) \r\n resultadopromedio.set(round(PromedioEdad))\r\n resultadodisfrute.set(self.ValorGeneroDisfrute)\r\n resultadofacilidad.set(self.ValorGeneroFacilidad)", "def ver_actividades_proyecto(request, flujo_id, proyecto_id):\n proyecto = get_object_or_404(Proyecto, id=proyecto_id)\n flujo = get_object_or_404(Flujo, id=flujo_id)\n user = User.objects.get(username=request.user.username)\n userRolProy = UsuarioRolProyecto.objects.filter(proyecto=proyecto_id)\n roles = UsuarioRolProyecto.objects.filter(usuario = user, proyecto = proyecto).only('rol')\n permisos_obj = []\n for i in roles:\n permisos_obj.extend(i.rol.permisos.all())\n permisos = []\n for i in permisos_obj:\n permisos.append(i.nombre)\n fluActProy = FlujoActividadProyecto.objects.filter(flujo = flujo_id, proyecto = proyecto_id).order_by('orden')\n actList = {}\n ultActividad = 0\n for rec in fluActProy:\n if not actList.has_key(rec.flujo.id):\n actList[rec.flujo.id] = {}\n if not actList[rec.flujo.id].has_key(int(rec.orden)):\n actList[rec.flujo.id][int(rec.orden)] = {}\n if not actList[rec.flujo.id][int(rec.orden)].has_key(rec.actividad.id):\n actList[rec.flujo.id][int(rec.orden)][rec.actividad.id] = []\n act = Actividad.objects.get(nombre = rec.actividad)\n actList[rec.flujo.id][int(rec.orden)][rec.actividad.id].append(act.nombre)\n actList[rec.flujo.id][int(rec.orden)][rec.actividad.id].append(act.descripcion)\n ultActividad = int(rec.orden)\n if actList:\n actDict = actList[int(flujo_id)]\n else:\n actDict = None\n lista = User.objects.all().order_by(\"id\")\n proyPend = False\n if proyecto.estado == 1:\n proyPend = True\n ctx = {'flujo':flujo,\n 'proyecto':proyecto,\n 'actividades':actDict,\n 'proyPend':proyPend,\n 'ultActividad':ultActividad,\n 'ver_flujo': 'ver flujo' in permisos,\n 'asignar_actividades_proyecto': 'asignar actividades proyecto' in permisos\n }\n return render_to_response('proyectos/admin_actividades_proyecto.html', ctx, context_instance=RequestContext(request))", "def listaFasesDeProyecto(self, proyecto):\n thead = self.estiloHoja['Heading5']\n thead.alignment=TA_CENTER\n tbody = self.estiloHoja[\"BodyText\"]\n tbody.alignment=TA_LEFT\n cabecera = [Paragraph('Nombre',thead),Paragraph('Orden',thead),Paragraph('Estado',thead),Paragraph('Tipo de Item',thead)]\n contenido = [cabecera]\n lista = MgrProyecto().fasesDeProyecto(proyecto.nombre)\n tabla = Table(contenido)\n for f in lista:\n tipoDeItem = MgrTipoDeItem().filtrarXId(f.tipoDeItemId)\n contenido.append([Paragraph(f.nombre,tbody), Paragraph(str(f.orden),tbody), Paragraph(f.estado,tbody), Paragraph(tipoDeItem.nombre,tbody)])\n tabla = Table(contenido) \n tabla.setStyle(self.tablaStyle)\n return tabla", "def generarReporteFase(self, proyecto):\n story = []\n contenido=[]\n #\n parrafo = self.titulo()\n story.append(parrafo) \n # \n parrafo2 = self.encabezado('Fases del Proyecto ' + proyecto.nombre )\n story.append(parrafo2)\n \n story.append(Spacer(0, 20))\n #\n lista = MgrProyecto().fasesDeProyecto(proyecto.nombre)\n for f in lista:\n parrafo2 = self.encabezado('Datos de Fase')\n story.append(parrafo2)\n contenido = self.datosFase(f)\n tabla = Table(contenido)\n tabla.setStyle(self.tablaStyle)\n story.append(tabla)\n story.append(Spacer(0, 20)) \n parrafo2 = self.encabezado('Lista de Item de Fase')\n story.append(parrafo2)\n tablaF = self.listaDeItem(f)\n story.append(tablaF) \n parrafo2 = self.encabezado('Lista de Linea Base de Fase')\n story.append(parrafo2)\n tablaLB = self.listaDeLB(f)\n story.append(tablaLB) \n story.append(Spacer(0, 40))\n contenido = []\n \n return story", "def proyectoIniciado(self, nombre):\n proyecto = self.filtrar(nombre)\n for fase in proyecto.listafases:\n if fase.estado != \"Desarrollo\":\n return False\n return True", "def cargarPedido(self):", "def prom_gasto():\n promedio = (sum(costos) + sum(costos_carameleria))/len(clientes)\n return promedio", "def ordenarFase(self, proyecto, fase):\n for i in proyecto.listafases: \n f = Fase.query.filter(Fase.idFase == i.idFase).first()\n if f.orden > fase.orden and fase.orden != f.orden:\n f.orden = f.orden - 1\n db.session.commit()\n return \":ordeno las fases:\"", "def schlafen(self):", "def profit(nbConso, prix,coutMenu,coutEntretien):", "def profit(Qte,pv):\n return Qte*(pv-coutMenu) - coutEntretien", "def ejecutar(self):\r\n eliminado = None\r\n #pedirle al turno ordenar los pokemon por velocidad\r\n primero = self.prioridad()\r\n segundo = self.__pokemon_jugador if (primero == self.__pokemon_rival) else self.__pokemon_rival\r\n #ejecutar el primer movimiento\r\n datos_ataque = primero.atacar(self.__get_diccionario_ataques()[primero])\r\n if datos_ataque != None:\r\n damage = self.calculo_damage(primero, segundo, datos_ataque)\r\n segundo.set_vida(segundo.get_vida() - damage[0])\r\n print(damage[1], end=' -> ') #Esto es la cadena que indica si es eficaz\r\n #comprobaciones\r\n if segundo.get_vida() < 0:\r\n segundo.set_vida(0)\r\n print(f'{segundo.get_nombre_completo()} | PS:{segundo.get_vida()}\\n')\r\n if segundo.get_vida() == 0:\r\n eliminado = segundo\r\n print(segundo.get_nombre_completo() + ' se ha debilitado')\r\n print()\r\n return eliminado\r\n if self.__get_diccionario_ataques()[self.__pokemon_jugador] != 'cambio':\r\n #ejecutar el otro movimiento\r\n datos_ataque = segundo.atacar(self.__get_diccionario_ataques()[segundo])\r\n if datos_ataque != None:\r\n damage = self.calculo_damage(segundo, primero, datos_ataque)\r\n primero.set_vida(primero.get_vida() - damage[0])\r\n print(damage[1], end=' -> ') #Esto es la cadena que indica si es eficaz\r\n #comprobaciones\r\n if primero.get_vida() < 0:\r\n primero.set_vida(0)\r\n print(f'{primero.get_nombre_completo()} | PS:{primero.get_vida()}\\n')\r\n if primero.get_vida() == 0:\r\n eliminado = primero\r\n print(primero.get_nombre_completo() + ' se ha debilitado')\r\n print('_________________________________________________________________________ \\n')\r\n print()\r\n return eliminado\r\n print('_________________________________________________________________________ \\n')\r\n return eliminado", "def revisar(self):\n cambios = 0\n # verifico valores posibles únicos en el grupo\n for celda1 in self.celdas:\n if celda1.vacia():\n for valor in celda1.posible:\n cantidad = self.incluye([valor])\n if cantidad == 1:\n # mensaje(celda1,valor,\"Asumiendo por \" + self.tipo)\n celda1.setvalor(valor)\n cambios += 1\n\n # verifico combinaciones de N valores que se repiten en N celdas\n for celda in self.celdas:\n # recorro las combinaciones de distintas longitudes a partir de 2\n for largo in range(1, len(celda.posible)):\n for comb in combinations(celda.posible, largo):\n cantidad = self.incluye(comb)\n # si la cantidad es exactamente la longitud\n if cantidad == largo and largo == len(comb):\n cantidad_unitaria = self.incluye_unit(comb)\n # si no hay celdas que cumplan\n if cantidad_unitaria == 0:\n cambios += self.asignar(comb)\n return cambios", "def lista_puntuacion(juegos_favoritos):\n ##https://serpapi.com/images-results\n from django.core.exceptions import MultipleObjectsReturned\n aleatorios = []\n for game in juegos_favoritos:\n juego = Juego.objects.get(titulo = game.juego)\n try:\n imagen = Imagen.objects.get(juego = game.juego)\n except MultipleObjectsReturned:\n print(\"Excepcion generada:\",MultipleObjectsReturned) \n imagen = Imagen.objects.get(juego = game.juego).first()[0]\n except Exception as e:\n print(\"Excepcion generada en inicio_jugador:\",e)\n imagen = Imagen.objects.get(juego = 1)\n aleatorios.append((juego,imagen))\n return aleatorios", "def continente_pais ( self , pais ) :\n\n for i1iIIIiI1I in self . continentes :\n if 70 - 70: Oo0Ooo % Oo0Ooo . IiII % OoO0O00 * o0oOOo0O0Ooo % oO0o\n if pais in self.continentes[i1iIIIiI1I] :\n return i1iIIIiI1I\n if 23 - 23: i11iIiiIii + I1IiiI" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retorna el numero de fases del proyecto
def nroDeFaseDeProyecto(self, nombre): proyecto = self.filtrar(nombre) cont = 0 for i in proyecto.listafases: if i != None: cont = cont + 1 return cont
[ "def count (self):\n total = 1\n for dep in self.deps:\n total += dep.count()\n return total", "def num_stages(self) -> int:\n pass", "def fasesDeProyecto(self, nombre):\n proyecto = self.filtrar(nombre)\n return proyecto.listafases", "def get_county_number():\n return len(get_counties())", "def num_fpgas(self):\n return self.__num_fpgas", "def count(self) -> int:\n return self.__solution_count", "def get_number_of_workers():", "def _get_count(self) -> \"size_t\" :\n return _core.DataProjects__get_count(self)", "def coxeter_number(self):\n return (self.number_of_reflection_hyperplanes()\n + self.number_of_reflections()) // self.rank()", "def get_number_of_phases(self):\n return 1", "def number_of_reflections_of_full_support(self):\n n = self.rank()\n h = self.coxeter_number()\n l = self.cardinality()\n codegrees = self.codegrees()[:-1]\n return (n * h * prod(codegrees)) // l", "def n_elements(self, f: Feature) -> int:\n return self.features[id(f)].n_elements()", "def games(self) -> int:\n return self.wins + self.losses + self.ties", "def getProfitableCount(self):\n\t\treturn len(self.__profits)", "def n_reference(self):\n n = 0\n u = self.position.sequence.upper()\n for a in self.alleles:\n if a.sequence == u:\n n += 1\n return n", "def get_solution_count():\n return len(DataHelper.solution_ids)", "def _get_count(self) -> \"size_t\" :\n return _core.FavoriteMaterials__get_count(self)", "def number_of_reflection_hyperplanes(self):\n from sage.rings.all import ZZ\n return ZZ.sum(codeg+1 for codeg in self.codegrees())", "def num_withdrawals(goal):\n transactions = GoalTransaction.objects.filter(goal=goal)\n\n if not transactions:\n return 0\n\n withdrawals = 0\n\n for t in transactions:\n if t.is_withdraw:\n withdrawals += 1\n\n return withdrawals" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ordena las fase de un proyecto
def ordenarFase(self, proyecto, fase): for i in proyecto.listafases: f = Fase.query.filter(Fase.idFase == i.idFase).first() if f.orden > fase.orden and fase.orden != f.orden: f.orden = f.orden - 1 db.session.commit() return ":ordeno las fases:"
[ "def createFase():\n # proyecto 1\n p = MgrProyecto().filtrar(\"proyecto1\")\n t = MgrTipoDeItem().filtrar(\"TipoDeItem1\")\n f = Fase(nombre=\"proyecto1-fase1\", descripcion=\"nueva fase\", orden=1, proyectoId= p.idProyecto, tipoDeItemId=t.idTipoDeItem)\n MgrFase().guardar(f)\n \n p = MgrProyecto().filtrar(\"proyecto1\")\n t = MgrTipoDeItem().filtrar(\"TipoDeItem1\")\n f = Fase(nombre=\"proyecto1-fase2\", descripcion=\"nueva fase\", orden=2, proyectoId= p.idProyecto, tipoDeItemId=t.idTipoDeItem)\n MgrFase().guardar(f)\n \n p = MgrProyecto().filtrar(\"proyecto1\")\n t = MgrTipoDeItem().filtrar(\"TipoDeItem2\")\n f = Fase(nombre=\"proyecto1-fase3\", descripcion=\"nueva fase\", orden=3, proyectoId= p.idProyecto, tipoDeItemId=t.idTipoDeItem)\n MgrFase().guardar(f)\n \n p = MgrProyecto().filtrar(\"proyecto1\")\n t = MgrTipoDeItem().filtrar(\"TipoDeItem3\")\n f = Fase(nombre=\"proyecto1-fase4\", descripcion=\"nueva fase\", orden=4, proyectoId= p.idProyecto, tipoDeItemId=t.idTipoDeItem)\n MgrFase().guardar(f)\n \n # proyecto 2\n p = MgrProyecto().filtrar(\"proyecto2\")\n t = MgrTipoDeItem().filtrar(\"TipoDeItem3\")\n f = Fase(nombre=\"proyecto2-fase1\", descripcion=\"nueva fase\", orden=1, proyectoId= p.idProyecto, tipoDeItemId=t.idTipoDeItem)\n MgrFase().guardar(f)\n \n p = MgrProyecto().filtrar(\"proyecto2\")\n t = MgrTipoDeItem().filtrar(\"TipoDeItem2\")\n f = Fase(nombre=\"proyecto2-fase2\", descripcion=\"nueva fase\", orden=2, proyectoId= p.idProyecto, tipoDeItemId=t.idTipoDeItem)\n MgrFase().guardar(f)\n \n p = MgrProyecto().filtrar(\"proyecto2\")\n t = MgrTipoDeItem().filtrar(\"TipoDeItem4\")\n f = Fase(nombre=\"proyecto2-fase3\", descripcion=\"nueva fase\", orden=3, proyectoId= p.idProyecto, tipoDeItemId=t.idTipoDeItem)\n MgrFase().guardar(f) \n \n p = MgrProyecto().filtrar(\"proyecto2\")\n t = MgrTipoDeItem().filtrar(\"TipoDeItem2\")\n f = Fase(nombre=\"proyecto2-fase4\", descripcion=\"nueva fase\", orden=4, proyectoId= p.idProyecto, tipoDeItemId=t.idTipoDeItem)\n MgrFase().guardar(f)", "def nroDeFaseDeProyecto(self, nombre):\n proyecto = self.filtrar(nombre)\n cont = 0\n for i in proyecto.listafases:\n if i != None:\n cont = cont + 1\n return cont", "def fasesDeProyecto(self, nombre):\n proyecto = self.filtrar(nombre)\n return proyecto.listafases", "def listaFasesDeProyecto(self, proyecto):\n thead = self.estiloHoja['Heading5']\n thead.alignment=TA_CENTER\n tbody = self.estiloHoja[\"BodyText\"]\n tbody.alignment=TA_LEFT\n cabecera = [Paragraph('Nombre',thead),Paragraph('Orden',thead),Paragraph('Estado',thead),Paragraph('Tipo de Item',thead)]\n contenido = [cabecera]\n lista = MgrProyecto().fasesDeProyecto(proyecto.nombre)\n tabla = Table(contenido)\n for f in lista:\n tipoDeItem = MgrTipoDeItem().filtrarXId(f.tipoDeItemId)\n contenido.append([Paragraph(f.nombre,tbody), Paragraph(str(f.orden),tbody), Paragraph(f.estado,tbody), Paragraph(tipoDeItem.nombre,tbody)])\n tabla = Table(contenido) \n tabla.setStyle(self.tablaStyle)\n return tabla", "def ordenarAsc(self):\n self.cartas.sort(key = lambda x: x.rango )", "def ver_actividades_proyecto(request, flujo_id, proyecto_id):\n proyecto = get_object_or_404(Proyecto, id=proyecto_id)\n flujo = get_object_or_404(Flujo, id=flujo_id)\n user = User.objects.get(username=request.user.username)\n userRolProy = UsuarioRolProyecto.objects.filter(proyecto=proyecto_id)\n roles = UsuarioRolProyecto.objects.filter(usuario = user, proyecto = proyecto).only('rol')\n permisos_obj = []\n for i in roles:\n permisos_obj.extend(i.rol.permisos.all())\n permisos = []\n for i in permisos_obj:\n permisos.append(i.nombre)\n fluActProy = FlujoActividadProyecto.objects.filter(flujo = flujo_id, proyecto = proyecto_id).order_by('orden')\n actList = {}\n ultActividad = 0\n for rec in fluActProy:\n if not actList.has_key(rec.flujo.id):\n actList[rec.flujo.id] = {}\n if not actList[rec.flujo.id].has_key(int(rec.orden)):\n actList[rec.flujo.id][int(rec.orden)] = {}\n if not actList[rec.flujo.id][int(rec.orden)].has_key(rec.actividad.id):\n actList[rec.flujo.id][int(rec.orden)][rec.actividad.id] = []\n act = Actividad.objects.get(nombre = rec.actividad)\n actList[rec.flujo.id][int(rec.orden)][rec.actividad.id].append(act.nombre)\n actList[rec.flujo.id][int(rec.orden)][rec.actividad.id].append(act.descripcion)\n ultActividad = int(rec.orden)\n if actList:\n actDict = actList[int(flujo_id)]\n else:\n actDict = None\n lista = User.objects.all().order_by(\"id\")\n proyPend = False\n if proyecto.estado == 1:\n proyPend = True\n ctx = {'flujo':flujo,\n 'proyecto':proyecto,\n 'actividades':actDict,\n 'proyPend':proyPend,\n 'ultActividad':ultActividad,\n 'ver_flujo': 'ver flujo' in permisos,\n 'asignar_actividades_proyecto': 'asignar actividades proyecto' in permisos\n }\n return render_to_response('proyectos/admin_actividades_proyecto.html', ctx, context_instance=RequestContext(request))", "def tieneAntecesor(self):\n\t\tself.item = self.session.query(Item).filter_by(coditem=10).first()\n\t\titemFase = self.item.fase.items \n\t\tfaseItemAnt = int(self.item.fase.codfase) - 1\n\t\tantecesores = self.session.query(Relacion).filter_by(coditemfin=10).filter_by(tipo='antecesor-sucesor').all()\n\t\t\n\t\tfaseAnt = self.session.query(Fase).filter_by(codfase=faseItemAnt).first()\n\t\titemFaseAnt = self.session.query(Item).filter_by(fase=faseAnt).all()\n\t\t\n\t\titemFaseAnterior = list()\n\t\tpila = list()\n\t\t \n\t\tfor h in itemFaseAnt:\n\t\t itemFaseAnterior.append(h.coditem)\n\t\t \n\t\tfor j in antecesores:\n\t\t if j.coditeminicio in itemFaseAnterior:\n\t\t valor = 1\n\t\t break \n\t\t\n\t\tpadres = self.session.query(Relacion).filter_by(coditemfin=10).filter_by(tipo='padre-hijo').all()\n\t\tfor i in padres:\n\t\t pila.append(i.coditeminicio) \n\t\t\n\t\tvalor = 0\n\t\t \n\t\twhile(pila and valor!=1):\n\t\t x = pila.pop()\n\t\t antecesores = self.session.query(Relacion).filter_by(coditemfin=x).filter_by(tipo='antecesor-sucesor').all()\n\t\t cantidad=0\n\t\t for m in antecesores:\n\t\t cantidad = cantidad + 1\n\t\t m.coditeminicio\n\t\t \n\t\t if cantidad == 0:\n\t\t antecesores = self.session.query(Relacion).filter_by(coditemfin=x).filter_by(tipo='padre-hijo').all()\n\t\t for i in antecesores: \n\t\t pila.append(i.coditeminicio)\n\t\t else:\n\t\t for j in antecesores:\n\t\t if j.coditeminicio in itemFaseAnterior:\n\t\t valor = 1\n\t\t break\n\t\t \n\t\tassert valor != 1, 'El item no tiene antecesores'", "def datosProyecto(self, proyecto):\n thead = self.estiloHoja['Heading5']\n thead.alignment=TA_CENTER\n tbody = self.estiloHoja[\"BodyText\"]\n tbody.alignment=TA_LEFT\n contenido=[]\n contenido.append([Paragraph('Nombre de Proyecto',thead),Paragraph(proyecto.nombre,tbody)])\n tabla = Table(contenido)\n lider = MgrProyecto().getLider(proyecto.nombre)\n contenido.append([Paragraph('Lider de Proyecto',thead),Paragraph(lider,tbody)])\n contenido.append([Paragraph('Estado de Proyecto',thead),Paragraph(proyecto.estado,tbody)])\n contenido.append([Paragraph('Presupuesto de Proyecto',thead),Paragraph(str(proyecto.presupuesto),tbody)])\n contenido.append([Paragraph('Fecha de Creacion de Proyecto',thead),Paragraph(str(proyecto.fechaDeCreacion),tbody)])\n contenido.append([Paragraph('Descripcion del Proyecto',thead),Paragraph(proyecto.descripcion,tbody)])\n comite = MgrComite().search(proyecto.nombre)\n contenido.append([Paragraph('Nombre de Comite del Proyecto',thead),Paragraph(comite.nombre,tbody)])\n contenido.append([Paragraph('Cantidad de Miembros',thead),Paragraph(str(comite.cantMiembro),tbody)])\n tabla = Table(contenido)\n tabla.setStyle(self.tablaStyle)\n return tabla", "def createProyecto():\n # crea un proyecto\n p = Proyecto(nombre=\"proyecto1\", descripcion=\"sistema 1\", presupuesto=10000)\n MgrProyecto().guardar(p)\n per = MgrPermiso().filtrarXModulo(\"ModuloGestion\")\n r = Rol(nombre=\"LiderDeProyecto\", descripcion=\"rol de lider\", ambito= p.nombre, permisos=per)\n MgrRol().guardar(r)\n MgrProyecto().asignarLider(proyecto = p , rol = r, nameLider = \"lory\")\n p = MgrProyecto().filtrar(\"proyecto1\")\n c = Comite(nombre=\"comite-proyecto1\", descripcion=\"comite de cambio\", cantMiembro=3, proyectoId=p.idProyecto)\n MgrComite().guardar(c)\n u = MgrProyecto().getUserLider(p.idProyecto)\n MgrComite().asignarUsuario(p,u)\n\n p = Proyecto(nombre=\"proyecto2\", descripcion=\"sistema 2\", presupuesto=20000)\n MgrProyecto().guardar(p)\n per = MgrPermiso().filtrarXModulo(\"ModuloGestion\")\n r = Rol(nombre=\"LiderDeProyecto\", descripcion=\"rol de lider\", ambito= p.nombre, permisos=per)\n MgrRol().guardar(r)\n MgrProyecto().asignarLider(proyecto = p , rol = r, nameLider = \"vavi\")\n p = MgrProyecto().filtrar(\"proyecto2\")\n c = Comite(nombre=\"comite-proyecto2\", descripcion=\"comite de cambio\", cantMiembro=3, proyectoId=p.idProyecto)\n MgrComite().guardar(c)\n u = MgrProyecto().getUserLider(p.idProyecto)\n MgrComite().asignarUsuario(p,u)\n\n p = Proyecto(nombre=\"proyecto3\", descripcion=\"sistema 3\", presupuesto=30000)\n MgrProyecto().guardar(p)\n per = MgrPermiso().filtrarXModulo(\"ModuloGestion\")\n r = Rol(nombre=\"LiderDeProyecto\", descripcion=\"rol de lider\", ambito= p.nombre, permisos=per)\n MgrRol().guardar(r)\n MgrProyecto().asignarLider(proyecto = p , rol = r, nameLider = \"guille\")\n p = MgrProyecto().filtrar(\"proyecto3\")\n c = Comite(nombre=\"comite-proyecto3\", descripcion=\"comite de cambio\", cantMiembro=3, proyectoId=p.idProyecto)\n MgrComite().guardar(c)\n u = MgrProyecto().getUserLider(p.idProyecto)\n MgrComite().asignarUsuario(p,u)\n \n p = Proyecto(nombre=\"proyecto4\", descripcion=\"sistema 4\", presupuesto=40000)\n MgrProyecto().guardar(p)\n per = MgrPermiso().filtrarXModulo(\"ModuloGestion\")\n r = Rol(nombre=\"LiderDeProyecto\", descripcion=\"rol de lider\", ambito= p.nombre, permisos=per)\n MgrRol().guardar(r)\n MgrProyecto().asignarLider(proyecto = p , rol = r, nameLider = \"stfy\")\n p = MgrProyecto().filtrar(\"proyecto3\")\n c = Comite(nombre=\"comite-proyecto3\", descripcion=\"comite de cambio\", cantMiembro=2, proyectoId=p.idProyecto)\n MgrComite().guardar(c)\n u = MgrProyecto().getUserLider(p.idProyecto)\n MgrComite().asignarUsuario(p,u)\n\n print \":cargo proyectos:\"", "def Inferencias(self):\r\n self.GeneroMasculinoDisfrute = 0\r\n self.GeneroFemeninoDisfrute = 0\r\n self.GeneroMasculinoFacilidad = 0\r\n self.GeneroFemeninoFacilidad = 0\r\n self.EdadValor = 0\r\n self.NumeroUsuarios = 0\r\n self.ValorGeneroFacilidad = \"\"\r\n self.ValorGeneroDisfrute = \"\"\r\n self.valoredad = 0\r\n self.EdadMayor = 0\r\n for i in onto.Usuario.instances(): \r\n self.NumeroUsuarios = 1 + self.NumeroUsuarios\r\n for prop in i.get_properties():\r\n for value in prop[i]:\r\n if prop.python_name == \"Facilidad_percibida_de_uso\" and value>59:\r\n for prop in i.get_properties():\r\n for value in prop[i]:\r\n if prop.python_name == \"Genero\":\r\n if str(value) == \"Masculino\":\r\n self.GeneroMasculinoFacilidad = 1 + self.GeneroMasculinoFacilidad\r\n elif str(value) == \"Femenino\":\r\n self.GeneroFemeninoFacilidad = 1 + self.GeneroFemeninoFacilidad \r\n if self.GeneroMasculinoFacilidad >= self.GeneroFemeninoFacilidad:\r\n self.ValorGeneroFacilidad = \"Masculino\"\r\n if self.GeneroMasculinoFacilidad <= self.GeneroFemeninoFacilidad:\r\n self.ValorGeneroFacilidad = \"Femenino\"\r\n if prop.python_name == \"Disfrute_percibido\" and value>59:\r\n for prop in i.get_properties():\r\n for value in prop[i]:\r\n if prop.python_name == \"Genero\":\r\n if str(value) == \"Masculino\":\r\n self.GeneroMasculinoDisfrute = 1 + self.GeneroMasculinoDisfrute\r\n elif str(value) == \"Femenino\":\r\n self.GeneroFemeninoDisfrute = 1 + self.GeneroFemeninoDisfrute\r\n if self.GeneroMasculinoDisfrute >= self.GeneroFemeninoDisfrute:\r\n self.ValorGeneroDisfrute= \"Masculino\"\r\n if self.GeneroMasculinoDisfrute <= self.GeneroFemeninoDisfrute:\r\n self.ValorGeneroDisfrute = \"Femenino\"\r\n if prop.python_name == \"Edad\":\r\n self.EdadValor = int(value) + self.EdadValor\r\n self.valoredad = int(value)\r\n if self.EdadMayor < self.valoredad:\r\n self.EdadMayor = self.valoredad\r\n\r\n if self.NumeroUsuarios==0:\r\n self.NumeroUsuarios=1\r\n PromedioEdad = (self.EdadValor / self.NumeroUsuarios)\r\n MayorEdad.set(self.EdadMayor) \r\n resultadopromedio.set(round(PromedioEdad))\r\n resultadodisfrute.set(self.ValorGeneroDisfrute)\r\n resultadofacilidad.set(self.ValorGeneroFacilidad)", "def schlafen(self):", "def juego(): \n tableros(tablero)\n ubicar_todo()\n #print(lista_final)\n tiros()", "def generarReporteFase(self, proyecto):\n story = []\n contenido=[]\n #\n parrafo = self.titulo()\n story.append(parrafo) \n # \n parrafo2 = self.encabezado('Fases del Proyecto ' + proyecto.nombre )\n story.append(parrafo2)\n \n story.append(Spacer(0, 20))\n #\n lista = MgrProyecto().fasesDeProyecto(proyecto.nombre)\n for f in lista:\n parrafo2 = self.encabezado('Datos de Fase')\n story.append(parrafo2)\n contenido = self.datosFase(f)\n tabla = Table(contenido)\n tabla.setStyle(self.tablaStyle)\n story.append(tabla)\n story.append(Spacer(0, 20)) \n parrafo2 = self.encabezado('Lista de Item de Fase')\n story.append(parrafo2)\n tablaF = self.listaDeItem(f)\n story.append(tablaF) \n parrafo2 = self.encabezado('Lista de Linea Base de Fase')\n story.append(parrafo2)\n tablaLB = self.listaDeLB(f)\n story.append(tablaLB) \n story.append(Spacer(0, 40))\n contenido = []\n \n return story", "def actualizarfran(self,):\r\n self.objeto_Ambc.actualizar(self.producto,self.talle,self.descripcion,self.marca,self.tree)", "def comprueba_fase_acabada(vol_ejecutado, de_tramo_0, estado_real, fase, hora, cont_t_arranque):\r\n # Si el volumen ejecutado supera o iguala al volumen de la subfase\r\n if vol_ejecutado.iloc[fase, :].sum() >= de_tramo_0.loc[de_tramo_0.index[fase], 'vol_subfase']:\r\n\r\n logging.info('Hora: ' + str(hora) + ' Fase: ' + str(\r\n fase) + ' Nivel IV: Fase Acabada')\r\n\r\n # Cuando acaba la subfase se supone que me llevo la maquinaria de la obra.\r\n # Se pone el contador a 0 de tiempo de arranque\r\n cont_t_arranque.loc[cont_t_arranque.index[fase], 't_arranque'] = 0\r\n\r\n return (estado_real)", "def profit(nbConso, prix,coutMenu,coutEntretien):", "def saludo():\r\n\tprint (\"Hola! Bienvenido al juego Luces Afuera.\")\r\n\tprint (\"El objetivo es muy simple, apagar todas las luces.\") \r\n\tprint (\"Las luces prendidas son los puntos o y las apagadas los puntos ·\")\r\n\tprint (\"Cuando presionás una luz, escribiendo su posicion, como por ejemplo D4 o A3, ésta se prende o apaga dependiendo de su estado inicial.\")\r\n\tprint (\"Pero OJO! Cada vez que presionás una luz, sus vecinas tambien se presionarán.\")\r\n\tprint (\"En todo momento podes escribir RESET y volver al tablero original, pero esto te hace perder puntos.\")\r\n\tprint ()", "def ordenar_fixture(lista_puntajes):\n\tlista_ordenada = sorted(lista_puntajes, key=lambda x: (x['numero_partido']))\n\treturn lista_ordenada", "def proyectoIniciado(self, nombre):\n proyecto = self.filtrar(nombre)\n for fase in proyecto.listafases:\n if fase.estado != \"Desarrollo\":\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retorna True si el proyecto ya existe
def existe(self, proyecto): p = Proyecto.query.filter(Proyecto.nombre == proyecto.nombre).first() if p != None: return True else: return False
[ "def does_project_exist(slug):\n return isdir(project_dir(slug))", "def has_current_project(self):\n return os.path.exists(CURRENT_PROJECT)", "def _validate_project_exists(self):\n odooclient = odoo_client.get_odoo_client()\n try:\n search = [['tenant_id', '=', self.project_id]]\n project = odooclient.projects.list(search)[0]\n self.odoo_project = project\n self.odoo_project_id = project.id\n self.odoo_project_name = project.name\n self.add_note('Odoo project %s (%s) exists.'\n % (self.odoo_project_name, self.odoo_project_id))\n return True\n except IndexError:\n self.add_note('Project %s does not exist in odoo'\n % self.project_id)\n return False", "def repo_exists(cls, name):\n return Objs.objects.filter(repo=name).exists()", "def exists(self) -> bool:\n L.info(f\"checking if {self.name} pipeline exists\")\n return requests.get(f\"{API}/{self.slug}\").status_code == 200", "def proyectoIniciado(self, nombre):\n proyecto = self.filtrar(nombre)\n for fase in proyecto.listafases:\n if fase.estado != \"Desarrollo\":\n return False\n return True", "def regressor_exists(self):\n\n regressor_dir = os.path.join(self.persistencedir,\n PERSIST_FILENAME)\n return os.path.isfile(regressor_dir)", "def check(self):\n slab_logger.log(15, 'Checking for repo %s' % self.get_reponame())\n if os.path.exists(\"./{}\".format(self.get_reponame())):\n slab_logger.log(25, \"repo for %s exist as %s\"\n % (self.name, self.get_reponame()))\n return True\n return False", "def distro_exists(distro):\n return distro in os.listdir('_package')", "def target_exist():\n if os.path.exists('Targets'):\n return True\n else:\n return False", "def _check_project_exists(course_code: str, project_id: str):\n check_course_exists(course_code)\n\n if not project_helper.project_exists(course_code, project_id):\n click.echo(f'The project with id \"{project_id} does not exist in \"{course_code}\".')\n sys.exit(1)", "def exists(self):\n return os.path.exists(self.localpath())", "def groc_dir_exists(self):\n return os.path.exists(self.groc_dir)", "def exists(_env):\n return True", "def if_exist(self):\n if not os.path.exists(self.video_path):\n logging.info(msg=f\"{self.video_path} doesn't exist!\")\n return False\n return True", "def proyectoFinalizado(self, nombre):\n proyecto = self.filtrar(nombre)\n for fase in proyecto.listafases:\n if fase.estado != \"Finalizado\":\n return False\n return True", "def savefile_exists(self):\n\n return os.path.isfile(str(self.savefile) + '.qu')", "def check_project_exists(course_code: str, project_id: str):\n if not project_helper.project_exists(course_code, project_id):\n click.echo(f'The project with id \"{project_id}\" does not exist'\n f'for the course \"{course_code}\".')", "def site_exist(self):\n return self.site_count != 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
make a grid definition from the hindcasts, for e.g. regridding of the forecasts if they are on a inconsistent grid .. also adds a `n_members` global attribute to the grid, to be used to select the same number of members in the ensemble in the forecasts
def make_hindcasts_grid_def(provider='CDS', GCM='ECMWF', var_name='T2M', rpath='gdata', year=2016, month=12): import pathlib import xarray as xr if rpath == 'local': root_dir = pathlib.Path.home() / 'research' / 'Smart_Ideas' / 'data' elif rpath == 'gdata': root_dir = pathlib.Path('/media/nicolasf/GDATA/END19101/Working/data/') elif rpath == 'network': root_dir = pathlib.Path.home() / 'drives' / 'auck_projects' / 'END19101' / 'Working' / 'data' if var_name is None: var_name = 'T2M' if year is None: year = 2016 if month is None: month = 12 dpath = pathlib.Path(root_dir / 'GCMs' / 'processed' / 'hindcasts' / provider / GCM / var_name) dset = xr.open_dataset(dpath / f"{GCM}_{var_name}_monthly_anomalies_{year}_{month}.nc") if 'member' in dset.dims: n_members = dset.dims['member'] grid = dset[['lat','lon']] if 'surface' in grid.coords: grid = grid.drop('surface') if 'month' in grid.coords: grid = grid.drop('month') grid.attrs['n_members'] = n_members grid.attrs['provider'] = provider grid.attrs['GCM'] = GCM return grid
[ "def create_param_grid():\n # pop_size_list = np.arange(100,1300,300).tolist()\n gens_list = np.arange(100,1300,300).tolist()\n select_list = ['tournament', 'rank']\n crossover_list = ['cycle_co', 'pmx_co']\n # mutate_list = ['swap_mutation']\n co_p_list = np.arange(0.1,1.1,0.2).round(2).tolist()\n mu_p_list = np.arange(0.1,1.1,0.2).round(2).tolist()\n elitism = [True, False]\n\n param_set = {\n # 'popsize': pop_size_list,\n 'gens' : gens_list,\n 'select' : select_list,\n 'crossover' : crossover_list,\n # 'mutate' : mutate_list,\n 'co_p' : co_p_list,\n 'mu_p' : mu_p_list,\n 'elitism' : elitism\n }\n\n\n param_grid = list(\n ParameterGrid(param_set)\n )\n\n return param_grid", "def _gen_grid(self):\n self.grid = np.zeros((self.tot_obj, 4))\n self.oc_grid = np.zeros((self.tot_obj, 4))\n for obj_id in range(self.tot_obj):\n x, y = self._gen_pos()\n in_view = 1.0 # Fully visible grid. Mask it later during observation gneration\n if self.obj_types[obj_id] == \"c\":\n obj_type = 1.0\n elif self.obj_types[obj_id] == \"d\":\n obj_type = 2.0\n elif self.obj_types[obj_id] == \"h\":\n obj_type = 3.0\n else:\n obj_type = -2.0\n self.grid[obj_id] = np.asarray([x, y, in_view, obj_type])", "def make_grid_graph(self):\r\n self.grid_graph = nx.grid_2d_graph(self.size_x + 1, self.size_y + 1)", "def create_grid(grid):\r\n grid.append([0]*4)\r\n grid.append([0]*4)\r\n grid.append([0]*4)\r\n grid.append([0]*4)", "def build_training_grid(**names_list_params):\n\n from itertools import product\n\n param_names = []\n individual_grids = []\n for param_name, values_list in names_list_params.items():\n assert isinstance(values_list, list), f\"Value for key {param_name} is not a list\"\n param_names.append(param_name)\n individual_grids.append(values_list)\n\n grid_list = []\n for grid in product(*individual_grids):\n grid_list.append({param_names[i]: grid[i] for i in range(len(param_names))})\n\n print(f\"Grid has {len(grid_list)} elements\")\n zfill_order = np.ceil(np.log10(len(grid_list))).astype(int)\n grid_dict = {f\"job_{str(i).zfill(zfill_order)}\": grid_element for i, grid_element in enumerate(grid_list)}\n\n return grid_dict", "def createGrid(xMax, yMax, cold, ambient, hot):\n grid = np.zeros((xMax*yMax), dtype='f').reshape(yMax,xMax)\n grid[:] = ambient\n #grid[0] = 0 # Top Row\n #grid[-1] = 0 # Bottom Row\n #grid[:,0] = 0 # Left Side\n #grid[:,-1] = 0 # Right Side\n grid[-2,3:9] = hot\n grid[2:6,1] = cold\n return grid", "def custom_grid():\n\n return np.arange(1, 82, dtype=np.int32).reshape((9, 9))", "def make_ensemble_array(self, grid_list):\r\n print(\"Making the common grids\")\r\n common_grids = Grid.common_grid(grid_list)\r\n print(\"Stared making arrays\")\r\n as_arrays = [self.array_from_grid(cg) for cg in common_grids]\r\n\r\n self.ensemble_array = np.stack(as_arrays, axis=-1)\r\n print(\"GridEnsemble complete\")\r\n self.dimensions = np.array(common_grids[0].bounding_box)\r\n self.shape = common_grids[0].nsteps", "def create_grid(grid):\r\n for t in range(4):\r\n grid.append([0,0,0,0])", "def create_grid(data, drone_altitude, safety_distance):#, resolution):\r\n\r\n # minimum and maximum north coordinates\r\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\r\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\r\n\r\n # minimum and maximum east coordinates\r\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\r\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\r\n\r\n # given the minimum and maximum coordinates we can\r\n # calculate the size of the grid.\r\n north_size = int(np.ceil((north_max - north_min)))#/resolution))\r\n east_size = int(np.ceil((east_max - east_min)))#/resolution))\r\n\r\n # Initialize an empty grid\r\n grid = np.zeros((north_size, east_size))\r\n\r\n # Populate the grid with obstacles\r\n for i in range(data.shape[0]):\r\n north, east, alt, d_north, d_east, d_alt = data[i, :]\r\n if alt + d_alt + safety_distance > drone_altitude:\r\n obstacle = [\r\n int(np.clip(north - d_north - safety_distance - north_min, 0, north_size-1)),\r\n int(np.clip(north + d_north + safety_distance - north_min, 0, north_size-1)),\r\n int(np.clip(east - d_east - safety_distance - east_min, 0, east_size-1)),\r\n int(np.clip(east + d_east + safety_distance - east_min, 0, east_size-1)),\r\n ]\r\n grid[obstacle[0]:obstacle[1]+1, obstacle[2]:obstacle[3]+1] = 1\r\n\r\n return grid, int(north_min), int(east_min)", "def gen_grid(self, grid_size):\n x = torch.linspace(-0.2, 0.2, grid_size, dtype=torch.float32)\n # grid_sizexgrid_size\n x, y = torch.meshgrid(x, x)\n # 2xgrid_sizexgrid_size\n grid = torch.stack([x, y], dim=0).view(\n [2, grid_size * grid_size]) # [2, grid_size, grid_size] -> [2, grid_size*grid_size]\n return grid", "def _nd_grid(self) -> pp.Grid:\n return self.gb.grids_of_dimension(self.Nd)[0]", "def gen_grid(self, grid_size):\n x = torch.linspace(-1.0, 1.0, grid_size, dtype=torch.float32)\n # grid_sizexgrid_size\n x, y = torch.meshgrid(x, x)\n # 2xgrid_sizexgrid_size\n grid = torch.stack([x, y], dim=0).view(\n [2, grid_size * grid_size]) # [2, grid_size, grid_size] -> [2, grid_size*grid_size]\n return grid", "def build_init_nx_grid(nodes, edges):\n\n G = nx.Graph() # initialize empty graph\n\n # add all nodes\n node_list = [node[1] for node in nodes.keys() if node[0] == 'd']\n add_nodes = [(cur_node, {'demand': nodes[('d', cur_node)],'gen_cap':nodes[('c', cur_node)], 'generated':0, 'un_sup_cost':0, 'gen_cost':0, 'original_demand': nodes[('d', cur_node)]}) for cur_node in node_list]\n G.add_nodes_from(add_nodes)\n\n # add all edges\n edge_list = [(min(edge[1], edge[2]), max(edge[1], edge[2])) for edge in edges if edge[0] == 'c']\n add_edges = [(cur_edge[0], cur_edge[1], {'capacity': edges[('c',) + cur_edge], 'susceptance': edges[('x',) + cur_edge]}) for cur_edge in edge_list if (edges[('c',) + cur_edge] > 0)]\n\n G.add_edges_from(add_edges)\n\n return(G)", "def makeGrids(self):\n # make sure connectivity was created\n self.mesh.init()\n # vertices\n cl = zeros((self.mesh.size(0), 3), dtype='d')\n cl[:, :self.dim] = self.mesh.coordinates()\n # keep reference\n self.refs.append(cl)\n # make vtkarray\n v = vtk.vtkPoints()\n v.SetNumberOfPoints(len(cl))\n v.SetData(VN.numpy_to_vtk(cl))\n # add points to a new grid\n self.vtkgrid = [None] * (self.dim + 1)\n # grids for edges, faces, cells\n for dim in range(1, self.dim + 1):\n self.vtkgrid[dim] = vtk.vtkUnstructuredGrid()\n # grids share points\n self.vtkgrid[dim].SetPoints(v)\n # get connectivity from topology\n nl = array(self.mesh.topology()(dim, 0)()).reshape(-1, dim + 1)\n ncells = len(nl)\n # cellsize = dim + 2\n cells = zeros((ncells, dim + 2), dtype=VN.ID_TYPE_CODE)\n cells[:, 1:] = nl\n cells[:, 0] = dim + 1\n self.refs.append(cells)\n # vtk cell array\n ca = vtk.vtkCellArray()\n ca.SetCells(ncells, VN.numpy_to_vtkIdTypeArray(cells))\n # add edges/faces as VTK cells\n if dim == 1:\n self.vtkgrid[dim].SetCells(vtk.VTK_LINE, ca)\n elif dim == 2:\n self.vtkgrid[dim].SetCells(vtk.VTK_TRIANGLE, ca)\n else:\n self.vtkgrid[dim].SetCells(vtk.VTK_TETRA, ca)\n self.vtkgrid[0] = self.vtkgrid[self.dim]", "def __init__(self, n_rows=3, n_columns=16, n_obs=2, agents_r=[0,2],\n agents_c=[16,16], n_agents=1, max_steps=50):\n assert(n_rows % 2 == 1)\n assert(n_columns % 2 == 0)\n # Only n_rows and n_columns have green and orange squares\n self.n_rows = n_rows\n self.n_columns = n_columns\n self.n_obs = n_obs\n # Total grid size is larger so that agents' observations are valid\n # when they are located on the boundary\n self.total_rows = self.n_rows + 2*self.n_obs\n self.total_columns = self.n_columns + 2*self.n_obs + 1\n\n # Used to determine episode termination\n self.max_collectible = self.n_rows * self.n_columns\n\n self.n_agents = n_agents\n self.max_steps = max_steps\n\n # Initial agent locations, situated in expanded grid \n self.agents_r = np.array(agents_r) + self.n_obs\n self.agents_c = np.array(agents_c) + self.n_obs", "def gen_grids(n_1, n_2):\n x_grid = 1.0*np.arange(-int(n_2/2), int(n_2/2)+1)\n y_grid = 1.0*np.arange(-int(n_1/2), int(n_1/2)+1)\n return x_grid, y_grid", "def test_build_nNodes_per_face(self):\n\n # test on grid constructed from sample datasets\n grids = [self.grid_mpas, self.grid_exodus, self.grid_ugrid]\n\n for grid in grids:\n # highest possible dimension dimension for a face\n max_dimension = grid.nMaxMesh2_face_nodes\n\n # face must be at least a triangle\n min_dimension = 3\n\n assert grid.nNodes_per_face.min() >= min_dimension\n assert grid.nNodes_per_face.max() <= max_dimension\n\n # test on grid constructed from vertices\n verts = [\n self.f0_deg, self.f1_deg, self.f2_deg, self.f3_deg, self.f4_deg,\n self.f5_deg, self.f6_deg\n ]\n grid_from_verts = ux.open_grid(verts)\n\n # number of non-fill-value nodes per face\n expected_nodes_per_face = np.array([6, 3, 4, 6, 6, 4, 4], dtype=int)\n nt.assert_equal(grid_from_verts.nNodes_per_face.values,\n expected_nodes_per_face)", "def generate_grid_from_flow_dir(rs):\n flow_dir_array = rs.read(1) # Read the first band of the Thames raster\n # First, just create the grid cells and set their outflow property to\n # contain the flow direction\n for y, xy in enumerate(flow_dir_array):\n for x, flow_dir in enumerate(xy):\n # Get the spatial coord at the centre and lower left of this pixel\n # Rastio .xy(row,col) takes row, col (y,x)\n xy_spatial = rs.xy(y,x)\n xy_spatial_ll = rs.xy(y,x,offset='ll')\n if flow_dir > 0:\n cell_ref = \"GridCell_{0}_{1}\".format(x+1,y+1)\n data_dict[cell_ref] = {\n \"x\": x+1,\n \"y\": y+1,\n \"x_coord_c\": xy_spatial[0],\n \"y_coord_c\": xy_spatial[1],\n \"x_coord_ll\": xy_spatial_ll[0],\n \"y_coord_ll\": xy_spatial_ll[1],\n \"size[d]\": list(rs.res),\n \"outflow[d]\": outflow_from_flow_dir(x+1, y+1, flow_dir),\n \"flow_dir\": int(flow_dir)\n }\n \n # Set the the inflow for each cell, based on the cell outflows. At the\n # end of this, each cell will have the number of outflows equal to the\n # number of river reaches that should be in that cell.\n for ref, cell in data_dict.items():\n x = cell['x']\n y = cell['y']\n outflow_ref = \"GridCell_{0}_{1}\".format(*cell['outflow[d]'])\n # Set this cell's outflow as the receiving cell's inflow.\n if outflow_ref in data_dict:\n if 'inflows' in data_dict[outflow_ref]:\n data_dict[outflow_ref]['inflows'].append([x,y])\n else:\n data_dict[outflow_ref]['inflows'] = [[x,y]]\n else:\n data_dict[ref]['domain_outflow[d]'] = cell['outflow[d]']\n \n # Now create the number of river reaches, based on the number of inflows.\n # If there are no inflows, there must just be one river reach\n for ref, cell in data_dict.items():\n x = cell['x']\n y = cell['y']\n if 'inflows' in cell:\n for i in range(1, len(cell['inflows']) + 1):\n data_dict[ref]['RiverReach_{0}_{1}_{2}'.format(x,y,i)] = {}\n data_dict[ref]['n_river_reaches'] = i\n else:\n data_dict[ref]['RiverReach_{0}_{1}_1'.format(x,y)] = {}\n data_dict[ref]['n_river_reaches'] = 1\n\n # Now that we've got the correct number of river reaches, we need to\n # set their inflows. We couldn't do this until they were all created\n # as we wouldn't know how many inflows the grid cell inflow reach would\n # have from the upstream cell. Remember each cell can only have one\n # outflow, so any river reach inflow will come from *all* reaches in\n # the upstream grid cell\n for ref, cell in data_dict.items():\n x = cell['x']\n y = cell['y']\n data_dict[ref]['SoilProfile_{0}_{1}_1'.format(x, y)] = {} # Empty soil profile group\n # Loop through the number of reaches, which will be the same number\n # as the number of cell inflows\n for i in range(1,cell['n_river_reaches'] + 1):\n reach_ref = \"RiverReach_{0}_{1}_{2}\".format(x,y,i)\n # Only carry on if this cell has inflows\n if 'inflows' in cell:\n x_in, y_in = cell['inflows'][i-1]\n data_dict[ref][reach_ref]['inflows[in][river_reach_ref]'] = []\n # *All* of the upstream cell's reaches must drain into this river\n for j in range(1,data_dict[\"GridCell_{0}_{1}\".format(x_in, y_in)]['n_river_reaches'] + 1):\n data_dict[ref][reach_ref]['inflows[in][river_reach_ref]'].append([x_in, y_in, j])\n data_dict[ref][reach_ref]['is_headwater'] = 0\n else:\n data_dict[ref][reach_ref]['is_headwater'] = 1\n # If there's a domain outflow to the grid cell, this setup means that every reach\n # will also be a domain outflow, so we must set them as so, using the grid cell's\n # domain outflow property\n if 'domain_outflow[d]' in cell:\n data_dict[ref][reach_ref]['domain_outflow[d]'] = cell['domain_outflow[d]']\n\n grid_dimensions = flow_dir_array.shape # Returns (y,x), not (x,y)!\n data_dict[\"grid_dimensions[d]\"] = [grid_dimensions[1], grid_dimensions[0]]\n\n headwaters = []\n for cell_ref, cell in data_dict.items():\n if cell_ref not in ['dimensions', 'grid_dimensions[d]']:\n x = int(cell_ref.split('_')[1])\n y = int(cell_ref.split('_')[2])\n for i in range(1, cell['n_river_reaches'] + 1):\n reach_ref = get_reach_ref(cell_ref, cell, i)\n reach = data_dict[cell_ref][reach_ref]\n # Is this a headwater?\n if 'is_headwater' in reach and reach['is_headwater'] == 1:\n headwaters.append(reach_ref)\n data_dict[cell_ref][reach_ref]['stream_order'] = 1\n # Does this have inflows? Set their outflow to this reach\n if 'inflows[in][river_reach_ref]' in reach:\n for inflow in reach['inflows[in][river_reach_ref]']:\n inflow_cell_ref = 'GridCell_{0}_{1}'.format(inflow[0], inflow[1])\n inflow_reach_ref = get_reach_ref(inflow_cell_ref, data_dict[inflow_cell_ref], inflow[2])\n data_dict[inflow_cell_ref][inflow_reach_ref]['outflow[river_reach_ref]'] = [x, y, i]\n \n routed_reaches.append(headwaters)\n route_reaches(seeds=headwaters, data_dict=data_dict)\n\n # Convert routed reach refs to an array \n routed_reaches_array = [[[*[int(i) for i in reach_ref.split('_')[1:4]]] for reach_ref in branch] for branch in routed_reaches]\n\n # Set the dimensions\n max_seeds = 0\n for branch in routed_reaches_array:\n max_seeds = max(len(branch), max_seeds)\n data_dict['dimensions'] = {\n 'branches': len(routed_reaches_array),\n 'seeds': max_seeds\n }\n\n # Fill the \"ragged\" parts of the routed reaches array with 0s to save having to\n # faff about with ragged arrays in NetCDF and then in Fortran\n for i, branch in enumerate(routed_reaches_array):\n if len(branch) < max_seeds:\n for j in range(len(branch),max_seeds):\n routed_reaches_array[i].append([0, 0, 0])\n data_dict['routed_reaches[branches][seeds][river_reach_ref]'] = routed_reaches_array\n\n return data_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function deriving locations and devices array from the csv files considering timedelta(current_client_time start_time) 1 day is taken as a time unit > 100days corresponds to 100%
def get_locations_devices(self, days_delta): mod = (days_delta % 100)/float(100) locations = [None] * 3 devices = [None] * 3 #locations for interviewees(1,2,3) employees2locations = company().employees2locations for i in range(0, 3): if mod <= employees2locations[0, :][i]: # percentage derived from locations.csv locations[i] = 'home' else: if mod <= (employees2locations[0, :][i])+(employees2locations[1, :][i]): # (percentage from locations.csv) + (previous one) locations[i] = 'public' else: if mod > (employees2locations[0, :][i])+(employees2locations[1, :][i]): locations[i] = 'office' #devices for interviewees(1,2,3) employees2devices = company().employees2devices for i in range(0, 3): if mod <= employees2devices[0, :][i]: # percentage derived from devices.csv devices[i] = 'desktop' else: if mod <= (employees2devices[0, :][i])+(employees2devices[1, :][i]): # (percentage from devices.csv) + (previous one) devices[i] = 'laptop' else: if mod > (employees2devices[0, :][i])+(employees2devices[1, :][i]): devices[i] = 'phone' return locations, devices
[ "def get_data_at_location(path, t_start, t_end, sensor_name):\r\n\r\n filename = path + '/' + sensor_name + '.csv'\r\n if os.path.isfile(filename):\r\n with open(filename) as file_in:\r\n data = csv.reader(file_in, delimiter='\\t')\r\n data_value = []\r\n for data_row in data:\r\n if data_row:\r\n for i in range(len(t_start)):\r\n if (float(data_row[0])>=t_start[i])and(float(data_row[0])<=t_end[i]):\r\n #print 'data added'\r\n data_value.append(data_row)\r\n file_in.close()\r\n else:\r\n print(('warning: sensor '+sensor_name+' not found.'))\r\n return []\r\n\r\n return data_value", "def divide_csv_daily(file_path):\n daily_files = []\n directory = os.path.dirname(file_path)\n\n try:\n data_frame = pd.read_csv(file_path)\n except Exception as error:\n LOG.error(f\"File {file_path} could not be parsed. Reason: {str(error)}\")\n raise error\n\n unique_times = data_frame.usage_start_time.unique()\n days = list({cur_dt[:10] for cur_dt in unique_times})\n daily_data_frames = [\n {\"data_frame\": data_frame[data_frame.usage_start_time.str.contains(cur_day)], \"date\": cur_day}\n for cur_day in days\n ]\n\n for daily_data in daily_data_frames:\n day = daily_data.get(\"date\")\n df = daily_data.get(\"data_frame\")\n day_file = f\"{day}.csv\"\n day_filepath = f\"{directory}/{day_file}\"\n df.to_csv(day_filepath, index=False, header=True)\n daily_files.append({\"filename\": day_file, \"filepath\": day_filepath})\n return daily_files", "def readStationTimetbl(filename):\r\n with open(filename) as csvfile:\r\n reader = csv.reader(csvfile, delimiter=',');\r\n stationTimetbl = [];\r\n for row in reader:\r\n platformTime = [];\r\n for time in row:\r\n platformTime.append(utilities.convertTimeFormatToSecs(time));\r\n stationTimetbl.append(platformTime);\r\n \r\n return stationTimetbl;", "def get_data_by_time(path, columns, start_date, start_time=\"00:00\", end_date=None, end_time=\"23:59\"):\n\n # Locate and read data file(s)\n if path[-1] != '/':\n path += '/'\n paths = [path + \"datalog \" + start_date + '.xls']\n data = [remove_notes(pd.read_csv(paths[0], delimiter='\\t'))]\n\n if end_date is not None:\n paths.append(path + \"datalog \" + end_date + \".xls\")\n data.append(remove_notes(pd.read_csv(paths[1], delimiter='\\t')))\n\n # Calculate start index\n time_column = pd.to_numeric(data[0].iloc[:, 0])\n interval = time_column[1]-time_column[0]\n start_idx = int(round((day_fraction(start_time) - time_column[0])/interval + .5)) #round up\n\n # Calculate end index\n time_column = pd.to_numeric(data[-1].iloc[:, 0])\n end_idx = int(round((day_fraction(end_time) - time_column[0])/interval + .5)) + 1 #round up\n\n # Get columns of interest\n if len(paths) == 1:\n if isinstance(columns, int):\n result = list(pd.to_numeric(data[0].iloc[start_idx:end_idx, columns]))\n else:\n result = []\n for c in columns:\n result.append(list(pd.to_numeric(data[0].iloc[start_idx:end_idx, c])))\n else:\n data[1].iloc[0, 0] = 0\n if isinstance(columns, int):\n result = list(pd.to_numeric(data[0].iloc[start_idx:, columns])) + \\\n list(pd.to_numeric(data[1].iloc[:end_idx, columns]) + (1 if columns == 0 else 0))\n else:\n result = []\n for c in columns:\n result.append(list(pd.to_numeric(data[0].iloc[start_idx:, c])) +\n list(pd.to_numeric(data[1].iloc[:end_idx, c])+(1 if c == 0 else 0)))\n\n return result", "def readCsv(path, avg_data, min_data, search_factor_data, detailed, ramais, km_de_conduta, pesquisa_ramais_data, pesquisa_km_data):\n \n\n \n \n data = pandas.read_csv(path, delimiter=';', decimal=',',skiprows=2)\n\n x_avg_data=[]\n y_avg_data=[]\n \n x_min_data=[]\n y_min_data=[]\n \n x_search_factor_data=[]\n y_search_factor_data=[]\n \n if ramais!=0 and km_de_conduta!=0:\n x_pesquisa_ramais_data=[] \n y_pesquisa_ramais_data=[]\n \n x_pesquisa_km_data=[]\n y_pesquisa_km_data=[]\n \n lastDate=0\n\n for index, row in data.iterrows():\n \n date = row['Data']\n read_value = row['Leitura']\n flow = row['Caudal']\n \n currentDate=datetime.strptime(date, '%d/%m/%Y %H:%M').date()\n #new date read or end of file\n if currentDate!=lastDate or index==data.iloc[-1].name :\n #if this is not the first row of the document and date is changed execute the calculations\n if lastDate!=0:\n \n avg_flow=round(tot_flow_current_day/days,2)\n y_avg_data.insert(0,avg_flow)\n x_avg_data.insert(0,lastDate)\n \n \n x_min_data.insert(0,min_time)\n y_min_data.insert(0,min_flow)\n \n current_day_search_factor=round(min_flow/avg_flow,2)\n x_search_factor_data.insert(0,lastDate)\n y_search_factor_data.insert(0,current_day_search_factor)\n \n if detailed:\n x_pesquisa_ramais_data.insert(0,lastDate) \n y_pesquisa_ramais_data.insert(0,round(min_flow/ramais,3))\n \n x_pesquisa_km_data.insert(0,lastDate) \n y_pesquisa_km_data.insert(0,round(min_flow/km_de_conduta,3))\n \n \n \n lastDate=currentDate\n tot_flow_current_day=flow\n \n min_flow=flow\n min_time=lastDate\n \n days=1\n \n #processing data of the same date \n else: \n \n tot_flow_current_day+=float(flow)\n days+=1\n \n if flow<min_flow:\n min_flow=flow\n min_time=lastDate\n \n avg_data=PlotData(x_avg_data,y_avg_data)\n min_data=PlotData(x_min_data,y_min_data)\n search_factor_data=PlotData(x_search_factor_data,y_search_factor_data)\n \n if detailed:\n pesquisa_ramais_data=PlotData(x_pesquisa_ramais_data,y_pesquisa_ramais_data)\n pesquisa_km_data=PlotData(x_pesquisa_km_data,y_pesquisa_km_data)\n \n return avg_data, min_data, search_factor_data, pesquisa_ramais_data, pesquisa_km_data", "def read_epcc_csv_logs(filename_in):\n\n csv_jobs = []\n\n with open(filename_in, 'rb') as csvfile:\n\n csv_dict = csv.DictReader(csvfile, delimiter=',', quotechar='\"')\n\n for line_dict in csv_dict:\n\n i_job = IngestedJob()\n\n # if isinstance(line_dict['ctime'], str):\n # # i_job.time_queued = int(line_dict['ctime'])\n # i_job.time_queued = int(line_dict['start']) + 999 # will be removed later..\n # else:\n # print \"line_dict['ctime']: \", line_dict['ctime']\n # i_job.time_queued = int(line_dict['start']) + 999 # will be removed later..\n\n try:\n i_job.time_queued = int(line_dict['ctime'])\n except:\n print((\"I didn't recognize ctime {0} as a number\".format(line_dict['ctime'])))\n i_job.time_queued = -1\n\n try:\n i_job.time_end = int(line_dict['end'])\n except:\n print((\"I didn't recognize end {0} as a number\".format(line_dict['end'])))\n i_job.time_end = -1\n\n try:\n i_job.time_start = int(line_dict['start'])\n except:\n print((\"I didn't recognize start {0} as a number\".format(line_dict['start'])))\n i_job.time_start = -1\n\n try:\n i_job.ncpus = int(line_dict['ncpus'])\n except:\n print((\"I didn't recognize start {0} as a number\".format(line_dict['ncpus'])))\n i_job.ncpus = -1\n\n try:\n i_job.nnodes = int(line_dict['node_count'])\n except:\n print((\"I didn't recognize start {0} as a number\".format(line_dict['node_count'])))\n i_job.nnodes = -1\n\n # i_job.group = line_dict['group'].strip()\n i_job.group = ''\n\n if line_dict['jobname']:\n i_job.jobname = line_dict['jobname'].strip()\n else:\n i_job.jobname = ''\n\n if line_dict['jobname']:\n i_job.user = line_dict['UserID'].strip()\n else:\n i_job.user = ''\n\n if line_dict['jobname']:\n i_job.queue_type = line_dict['queue'].strip()\n else:\n i_job.queue_type = ''\n\n # info not available\n i_job.time_created = -1\n i_job.time_eligible = -1\n i_job.memory_kb = -1\n i_job.cmd_str = None # command line string not available\n\n csv_jobs.append(i_job)\n\n # remove invalid entries\n csv_jobs[:] = [job for job in csv_jobs if job.time_start != -1]\n csv_jobs[:] = [job for job in csv_jobs if job.time_end != -1]\n csv_jobs[:] = [job for job in csv_jobs if job.time_end >= job.time_start]\n csv_jobs[:] = [job for job in csv_jobs if job.time_queued != -1]\n csv_jobs[:] = [job for job in csv_jobs if job.time_start >= job.time_queued]\n csv_jobs[:] = [job for job in csv_jobs if job.ncpus > 0]\n csv_jobs[:] = [job for job in csv_jobs if job.nnodes > 0]\n\n # store the original idx of each job..\n for (ii, i_job) in enumerate(csv_jobs):\n i_job.idx_in_log = ii\n\n csv_jobs.sort(key=lambda x: x.time_start, reverse=False)\n\n # times relative to start of log\n min_start_time = min([i_job.time_start for i_job in csv_jobs])\n for i_job in csv_jobs:\n # print type(i_job.time_queued), type(i_job.time_end), type(i_job.time_start)\n i_job.runtime = float(i_job.time_end) - float(i_job.time_start)\n i_job.time_start_0 = i_job.time_start - min_start_time\n i_job.time_in_queue = i_job.time_start - i_job.time_queued\n\n return csv_jobs", "def _read_dwd(date, timezone, longitude, latitude, path):\n \n # initialize variables \n dwdpath = os.path.join(os.path.join(path, \"dwd\"))\n fields = [\"aswdifd_s\", \"aswdir_s\", \"t_2m\", \"t_g\"]\n \n lastForecast = None\n for f in range(len(fields)):\n # get date of latest forecast\n dirList = os.listdir(os.path.join(dwdpath, fields[f]))\n dirList.sort(reverse = True)\n if dirList[0].rsplit(\"_\", 2)[0] == 120:\n lastForecast = dirList[0].rsplit(\"_\", 2)[1]\n \n if lastForecast != None:\n # unpack compressed, latest forecast\n os.system(\"bunzip2 --keep `find \" + dwdpath + \" -name '*\" + lastForecast + \"*.bz2'`\")\n \n dates = []\n data = []\n for f in range(len(fields)):\n # list all extracted grib files\n dirList = glob.glob(os.path.join(dwdpath, fields[f], \"*\" + lastForecast + \"*.grib2\"))\n dirList.sort()\n \n lastValue = 0\n data.append([])\n \n if len(dirList) >= 48:\n for i in range(24):\n grb = pygrib.open(dirList[i])\n grb.seek(0)\n \n lat, lon = grb.latlons()\n i, j = _get_location_nearest(lat, lon, latitude, longitude)\n \n lastTimestamp = False\n firstTimestamp = False\n for g in grb:\n timestamp = datetime.datetime.strptime(str(g['validityDate']) + \" \" + '%0.0f'%(g['validityTime']/100.0), \"%Y%m%d %H\")\n \n if lastTimestamp:\n if f == 0:\n datestr = datetime.datetime.strftime(lastTimestamp, \"%Y-%m-%d %H\")\n dates.append(datestr)\n \n if fields[f] == \"aswdifd_s\" or fields[f] == \"aswdir_s\":\n diff = (timestamp - lastTimestamp).total_seconds() / 3600.0\n value = (1 / diff) * ((timestamp - firstTimestamp).total_seconds() / 3600 * g['values'][i, j] - (lastTimestamp - firstTimestamp).total_seconds() / 3600 * lastValue)\n else:\n value = g['values'][i, j]\n \n data[f].append(value)\n \n else:\n firstTimestamp = timestamp\n \n lastTimestamp = timestamp\n lastValue = g['values'][i, j]\n \n grb.close()\n \n if len(dates) > 0:\n csvpath = os.path.join(os.path.join(path, \"csv\"))\n with open(os.path.join(csvpath, \"DWD_\" + lastForecast + \".csv\"), 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter = \",\")\n line = [\"time\"]\n line.extend(fields)\n writer.writerow(line)\n for i in range(len(dates)):\n line = [dates[i] + \":00:00\"]\n for j in range(len(fields)):\n line.append(data[j][i])\n writer.writerow(line)\n \n # clean up\n os.system(\"find \" + dwdpath + \" -name '*\" + lastForecast + \"*.grib2' -exec rm -f {} \\;\")\n \n return None;", "def init_device_list(self):\n self.devices_list = []\n if self.csv_file_name and os.path.exists(self.csv_file_name):\n with open(self.csv_file_name, 'rb') as csv_file:\n csv_reader = reader(csv_file, delimiter=',')\n for row in csv_reader:\n if self.valid_ip(row[1]):\n self.devices_list.append(row)\n return self.devices_list\n else:\n logging.getLogger(\"HWR\").error(\\\n \"BeamlineTest: Device file \" + \\\n \"%s not found\" % self.csv_file_name)", "def load_dataset(start_date, end_date, config):\n datetime_index = pd.read_csv(config.index_dir)\n datetime_ref = pd.read_csv(config.index_ref_dir, index_col=0, dtype={\"symbol\": \"str\"})\n datetime_index[\"date\"] = pd.to_datetime(datetime_index['datetime']).dt.date\n start_index = np.where(datetime_index[\"date\"].values >=\n pd.to_datetime(str(start_date), format=\"%Y-%m-%d\").date())[0][0]\n end_index = np.where(datetime_index[\"date\"].values <\n pd.to_datetime(str(end_date), format=\"%Y-%m-%d\").date())[0][-1]\n datetime_index = datetime_index.iloc[start_index:end_index + 1] # select dates needed here\n trade_date = datetime_index[\"date\"].values\n valid_datetime = datetime_index[\"datetime\"].values\n symbol = datetime_ref.loc[valid_datetime, \"symbol\"].values\n\n factor_cols = pd.read_csv(config.factor_dir)[\"factor_name\"]\n factor_size = len(factor_cols)\n offset = start_index\n count = end_index - start_index + 1\n with open(config.data_dir, \"rb\") as afile:\n afile.seek(offset * factor_size * 4)\n arr = np.fromfile(afile, count=count * factor_size, dtype=np.float32)\n arr = arr.reshape(-1, factor_size)\n return arr, trade_date, symbol", "def _rap_file_dates(files):\n return pd.DatetimeIndex( # remove trailing 0's\n [datetime.strptime(c_file.split(\"/\")[-1][:-6], \"%y%j%H\")\n for c_file in files], tz='UTC')", "def load_monitor(): \n\n # get the start time\n start_time = get_monitor_start_time()\n\n # initialize an array to hold the data\n monitor_data = np.empty((0,4), float)\n\n # loop through the files and load the data\n for filename in os.listdir('../data_ucn/monitor_detector'):\n \n # get the time stamp from the txt file and the counts from the tof file\n # but we only check for one, so that we don't do each twice.\n if(filename[0] == 'T' and 'tof' in filename):\n \n # print(filename[0:12])\n\n # grab from the text file associated with the run\n f = open('../data_ucn/monitor_detector/' \n + filename[0:12] \n + '.txt') \n\n lines = f.readlines()\n f.close()\n\n # grab the epoch time for run start\n date_time = filename[1:3].zfill(2) + '.12.2017 '\\\n + lines[26][15:23]\n \n pattern = '%d.%m.%Y %H:%M:%S'\n run_time = int(time.mktime(\n time.strptime(date_time, pattern)))\n\n # reset the run_start_time with reference to the\n # t = 0 time\n # !!! temporarily use the raw UNIX epoch time stamp\n# run_time = run_time - start_time\n\n # load the monitor count data\n arr = np.loadtxt('../data_ucn/monitor_detector/' + filename,\n usecols = (1))\n\n # sum the counts\n counts = np.sum(arr)\n\n # saving the [day].[run number] can be useful for debugging\n day_run_no = int(filename[1:3]) + (0.001\n * int(filename[9:12]))\n\n # the current data is appended to the existing data array\n monitor_data = np.append(monitor_data, [[run_time, \n counts, \n np.sqrt(counts),\n day_run_no]], axis = 0)\n \n return monitor_data[monitor_data[:,0].argsort()]", "def read_trt_info_all2(info_path):\n file_list = glob.glob(info_path + '*.txt')\n if not file_list:\n warn('No info files in ' + info_path)\n return None\n\n trt_time = np.ma.array([], dtype=datetime.datetime)\n cell_id = np.ma.array([], dtype=int)\n rank = np.ma.array([])\n scan_time = np.ma.array([], dtype=datetime.datetime)\n azi = np.ma.array([])\n rng = np.ma.array([])\n lat = np.ma.array([])\n lon = np.ma.array([])\n ell_l = np.ma.array([])\n ell_s = np.ma.array([])\n ell_or = np.ma.array([])\n vel_x = np.ma.array([])\n vel_y = np.ma.array([])\n det = np.ma.array([])\n\n for file in file_list:\n (trt_time_aux, id_aux, rank_aux, scan_time_aux, azi_aux, rng_aux,\n lat_aux, lon_aux, ell_l_aux, ell_s_aux, ell_or_aux, vel_x_aux,\n vel_y_aux, det_aux) = read_trt_info(file)\n\n if trt_time_aux is None:\n continue\n\n trt_time = np.ma.append(trt_time, trt_time_aux)\n cell_id = np.ma.append(cell_id, id_aux)\n rank = np.ma.append(rank, rank_aux)\n scan_time = np.ma.append(scan_time, scan_time_aux)\n azi = np.ma.append(azi, azi_aux)\n rng = np.ma.append(rng, rng_aux)\n lat = np.ma.append(lat, lat_aux)\n lon = np.ma.append(lon, lon_aux)\n ell_l = np.ma.append(ell_l, ell_l_aux)\n ell_s = np.ma.append(ell_s, ell_s_aux)\n ell_or = np.ma.append(ell_or, ell_or_aux)\n vel_x = np.ma.append(vel_x, vel_x_aux)\n vel_y = np.ma.append(vel_y, vel_y_aux)\n det = np.ma.append(det, det_aux)\n\n return (\n trt_time, cell_id, rank, scan_time, azi, rng, lat, lon, ell_l, ell_s,\n ell_or, vel_x, vel_y, det)", "def read_trt_info_all(info_path):\n file_list = glob.glob(info_path + '*.txt')\n if not file_list:\n warn('No info files in ' + info_path)\n return None\n\n trt_time = np.array([], dtype=datetime.datetime)\n cell_id = np.array([], dtype=int)\n rank = np.array([])\n nscans = np.array([], dtype=int)\n azi = np.array([])\n rng = np.array([])\n lat = np.array([])\n lon = np.array([])\n ell_l = np.array([])\n ell_s = np.array([])\n ell_or = np.array([])\n vel_x = np.array([])\n vel_y = np.array([])\n det = np.array([])\n\n for file in file_list:\n (trt_time_aux, id_aux, rank_aux, nscans_aux, azi_aux, rng_aux,\n lat_aux, lon_aux, ell_l_aux, ell_s_aux, ell_or_aux, vel_x_aux,\n vel_y_aux, det_aux) = read_trt_info(file)\n\n if trt_time_aux is None:\n continue\n\n trt_time = np.append(trt_time, trt_time_aux)\n cell_id = np.append(cell_id, id_aux)\n rank = np.append(rank, rank_aux)\n nscans = np.append(nscans, nscans_aux)\n azi = np.append(azi, azi_aux)\n rng = np.append(rng, rng_aux)\n lat = np.append(lat, lat_aux)\n lon = np.append(lon, lon_aux)\n ell_l = np.append(ell_l, ell_l_aux)\n ell_s = np.append(ell_s, ell_s_aux)\n ell_or = np.append(ell_or, ell_or_aux)\n vel_x = np.append(vel_x, vel_x_aux)\n vel_y = np.append(vel_y, vel_y_aux)\n det = np.append(det, det_aux)\n\n return (\n trt_time, cell_id, rank, nscans, azi, rng, lat, lon, ell_l, ell_s,\n ell_or, vel_x, vel_y, det)", "def load_station_data(self, filename, dataset='ECA-station', print_prog=True, offset_in_file=0):\n\n if dataset == 'Klem_day':\n raw_data = np.loadtxt(self.data_folder + filename) # first column is continous year and second is actual data\n self.data = np.array(raw_data[:, 1])\n time = []\n\n # use time iterator to go through the dates\n y = int(np.modf(raw_data[0, 0])[1])\n if np.modf(raw_data[0, 0])[0] == 0:\n start_date = date(y, 1, 1)\n delta = timedelta(days = 1)\n d = start_date\n while len(time) < raw_data.shape[0]:\n time.append(d.toordinal())\n d += delta\n self.time = np.array(time)\n self.location = 'Praha-Klementinum, Czech Republic'\n print(\"Station data from %s saved to structure. Shape of the data is %s\" % (self.location, str(self.data.shape)))\n print(\"Time stamp saved to structure as ordinal values where Jan 1 of year 1 is 1\")\n\n if dataset == 'ECA-station':\n with open(self.data_folder + filename, 'rb') as f:\n time = []\n data = []\n missing = []\n i = 0 # line-counter\n reader = csv.reader(f)\n for row in reader:\n i += 1\n if i == 16 + offset_in_file: # line with location\n c_list = filter(None, row[1].split(\" \"))\n del c_list[-2:]\n country = ' '.join(c_list).lower()\n station = ' '.join(row[0].split(\" \")[7:]).lower()\n self.location = station.title() + ', ' + country.title()\n if i > 20 + offset_in_file: # actual data - len(row) = 5 as STAID, SOUID, DATE, TG, Q_TG\n staid = int(row[0])\n value = float(row[3])\n year = int(row[2][:4])\n month = int(row[2][4:6])\n day = int(row[2][6:])\n time.append(date(year, month, day).toordinal())\n if value == -9999.:\n missing.append(date(year, month, day).toordinal())\n data.append(np.nan)\n else:\n data.append(value/10.)\n self.station_id = staid\n self.data = np.array(data)\n self.time = np.array(time)\n self.missing = np.array(missing)\n if print_prog:\n print(\"Station data from %s saved to structure. Shape of the data is %s\" % (self.location, str(self.data.shape)))\n print(\"Time stamp saved to structure as ordinal values where Jan 1 of year 1 is 1\")\n if self.missing.shape[0] != 0 and self.verbose:\n print(\"** WARNING: There were some missing values! To be precise, %d missing values were found!\" % (self.missing.shape[0]))", "def findStartConditions(ncfile, files, odir):\n\n print '{} drifters will be analysed...'.format(len(files))\n\n data_count = 0\n names= []\n lon0 = []\n lat0 = []\n x0 = []\n y0 = []\n mtime0 = []\n firststep = []\n laststep = []\n\n for i, fname in enumerate(files, start=1):\n print 'working with ' + fname + '...'\n\n drift = Drifter(odir+fname, debug=False)\n\n print '\\tcompiling data...'\n lon0.append(drift.Variables.lon[0])\n lat0.append(drift.Variables.lat[0])\n mtime0.append(drift.Variables.matlabTime[0])\n firststep.append(int(np.argmin(np.abs(ncfile.Variables.matlabTime - \\\n drift.Variables.matlabTime[0]))))\n laststep.append(int(np.argmin(np.abs(ncfile.Variables.matlabTime - \\\n drift.Variables.matlabTime[-1]))))\n names.append(fname)\n\n idx = closest_points(drift.Variables.lon[0], drift.Variables.lat[0], \\\n ncfile.Grid.lon, ncfile.Grid.lat)\n\n x0.append(ncfile.Grid.x[idx])\n y0.append(ncfile.Grid.y[idx])\n\n return names, lon0, lat0, x0, y0, mtime0, firststep, laststep", "def batch_read_gps_logs(directory):\n pass", "def read_file(self):\n\n file_name = os.path.join(self._base_path, \"my-TransitStopTimes.csv\")\n line_count = 0\n f = None\n\n print \"StopTimes: Mapping stops to routes...\"\n print \"StopTimes: Reading file %s...\" % file_name\n\n try:\n start_time = time.time()\n f = open(file_name, 'r')\n\n for line in f:\n line_count += 1\n if line_count == 1: continue\n\n line = line.strip()\n parts = line.split(\",\")\n\n try:\n stop_id = self.make_stop_id(parts[0].strip())\n except:\n print \"Failed to get stop id from: %s\" % repr(parts[0].strip())\n stop_id = None\n\n if stop_id is None:\n raise ValueError(\"error!!!\")\n\n # if stop_id == 3432:\n # raise ValueError(\"Got stop id 3432\")\n\n if stop_id is None:\n print \"no stop ID\"\n continue\n\n trip_id = int(parts[1].strip())\n\n route = self._dataman.get_route_from_trip_id(trip_id)\n\n if route is None:\n # I think its perfectly valid to fail to get a route ID when there are\n # two sets of data in the OPEN dataset\n continue\n\n # raise ValueError(\"Failed to get route for trip ID: %s\" % repr(trip_id))\n\n depart_time_str = parts[3].strip()\n depart_time = timestr_to_int(depart_time_str)\n\n # print stop_id, trip_id, depart_time\n # print \"LINE\", line, trip_id, stop_id\n\n stop = self._dataman.get_stop(stop_id)\n\n if stop is None:\n raise ValueError(\"Failed to find stop for stop_id: %d\" % stop_id)\n\n # Cross link the stop / routes\n stop.add_route_id(route.get_id())\n route.add_stop_id(stop_id)\n\n departure_data = self._departure_dict.get(stop_id, {})\n\n service_type = self._dataman.get_trip_service_type(trip_id)\n headsign = self._dataman.get_trip_headsign(trip_id)\n direction = self._dataman.get_trip_direction(trip_id)\n\n # Old key... not quite correct. Must consider stop_id BUT NOT direction\n # key = \"%d-%d-%d-%d\" % (depart_time, service_type, route.get_id(), direction)\n\n # Trying to get rid of duplicate departures\n key = \"%d-%d-%d-%d\" % (stop_id, depart_time, service_type, route.get_id())\n\n if departure_data.has_key(key):\n\n # This is happening when only the direction differs. And this is happening\n # for example at July Stop 3458 route 10236, which is the end of the line.\n # Departures before this stop have direction 0, whereas departures after this\n # stop have direction 1. But this stop has direction 0 AND 1\n\n # Sooo, let direction 0 override direction 1. This means we should overwrite\n # existing data if direction is 0, or punt if direction is 1\n\n if direction == 1:\n self._count_duplicate_keys_total += 1\n continue\n\n if service_type is None:\n print \"failed to get service_id for trip_id\", trip_id\n\n departure_data[key] = {\n KEY.TRIP_ID : trip_id,\n KEY.DEPART_TIME : depart_time,\n KEY.SERVICE_TYPE : service_type,\n KEY.ROUTE_ID : route.get_id(),\n KEY.HEADSIGN : headsign,\n KEY.DIRECTION : direction\n }\n self._departure_dict[stop_id] = departure_data\n\n read_time = time.time() - start_time\n print \"file: %s departures: %d read time: %.2f sec\" % (file_name, line_count - 1, read_time)\n print \"StopTimes: duplicate departure key count\", self._count_duplicate_keys_total\n\n # raise ValueError(\"temp stop\")\n\n finally:\n if f:\n f.close()", "def _create_array(self,path):\n #load data\n x = np.genfromtxt(path,delimiter = ',')\n #cut unnecessary parts (the stimuli was present 5-20 seconds)\n x = x[5*self.freq:20*self.freq]\n\n re_sult = re.search('(?P<freq>\\d+)Hz',path)\n f = re_sult.group('freq')\n target = self._target[f]\n t = [target for n in range(x.shape[0])]\n return np.column_stack((x,t))", "def read_data_by_days(sc, path, start, end):\n day_list = get_day_range(start, end)\n print \"get data from {0} to {1}\".format(day_list[0], day_list[-1])\n day_paths = map(lambda x: \"{0}/day={1}\".format(path, x), day_list)\n day_paths_str = \",\".join(day_paths)\n rdd = sc.textFile(day_paths_str)\n return rdd" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates total price for list of menu items
def calc_total_price(items): total_price = 0 for item in items: total_price += item.get('price') * item.get('quantity') return total_price
[ "def lineitem_price(self):\n price = Decimal(\"0.00\")\n for li in self.lineitems.all():\n price += li.total\n return price", "def subtotal_calc(selected_products):\n subtotal = 0\n for product in selected_products:\n price = product[\"price\"]\n subtotal = price + subtotal\n return subtotal", "def total(self):\n total = 0.0 \n for vinylid in self.cart:\n total += self.get_price(vinylid) \n return total", "def calculateAmount(self) -> float:\n\n amount = 0\n for item in self.items:\n amount += item.price\n return amount", "def total(anItem):\r\n\r\n if anItem.price <= 0:\r\n raise ValueError(\"total does not compute prices at or below 0 cent\")\r\n\r\n if anItem.necessary:\r\n tax = anItem.price * 0.01\r\n else:\r\n tax = anItem.price * 0.09\r\n return anItem.price + tax", "def calculate_item_total(order_items):\n item_total = 0\n\n for order in order_items:\n item_total += order.get('price') * order.get('quantity')\n\n return item_total", "def calculate_stock_price(items):\n total = 0\n for key in items:\n print key\n print \"price: %s\" % prices[key]\n print \"stock: %s\" % stock[key]\n total += prices[key] * stock[key]\n return total", "def _get_price(self):\n return sum((cart_line.price for cart_line in self.values()))", "def add_rental_fee(time,characters,menu):\n for item in menu:\n if item[\"Character\"] == characters:\n price = item[\"Price\"] * time\n return price", "def calculate_total_price(prices, discount):\n \n sum_prices = 0\n\n for price in prices:\n dis = discount/100\n pricedis = price - price * dis\n print(pricedis)\n sum_prices = sum_prices + pricedis\n print(sum)\n return math.floor(sum_prices)", "def get_total_price(receipt):\n return sum(price for name, price in receipt)", "def get_total_price(self):\n return self.pizza.get_total_price()", "def subtotal(self):\r\n return self.price * self.quantity", "def add_menue_item(order, menue):\n price = \"%.2f\" % round(float(menue['price']),2)\n menue['order'] = menue['order'] + 1\n print(menue['order'], 'order(s) of', order, 'at $' + str(price) , ' have been added to your meal.')\n global TOTAL\n TOTAL = float(price) + TOTAL\n print('Current Total W/O tax: $' + str(\"%.2f\" % round(float(TOTAL),2)))", "def total(basket: list) -> int:\n price = sum(price_group(book_set)\n for book_set in disc_group(basket))\n # Adjust price for better suited (4+4 iso 5+3) group.\n if len(basket) % 8 == 0:\n price -= 5 * len(basket)\n return price", "def insurance_price(self, items: Iterable[Item]) -> int:\n total_price: float = sum(\n self.__templates_repository.get_template(item).props.CreditsPrice\n for item in items\n )\n total_price *= self.__insurance_price_multiplier\n total_price -= total_price * min(self.standing.current_standing, 0.5)\n return int(total_price)", "def total_price(food_1, food_2):\n \n total = food_prices[food_1] + food_prices[food_2]\n\n return total", "def get_total(self, num_lemonades):\n return self.price * num_lemonades", "def _getPrice( self, node ):\n\n\t\tprice = -1\n\t\n\t\tif etree.tostring(node).find(\"<ListPrice>\") > -1: # UGLY HACK\n\t\t\tpriceString = node.ItemAttributes.ListPrice.Amount\n\t\t\tprice = float( priceString ) / 100.0\n\t\n\t\t\n\t\treturn price" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all coordinates in a 2D region. If only one pair is provided, the loop will yield x1z1 values If two pairs are provided the loop will yield all results between them inclusively
def loop2d(x1, y1, x2=None, y2=None): if x2 is None or y2 is None: x1, y1, x2, y2 = 0, 0, x1 - 1, y1 - 1 for x in range(x1, x2 + 1): for y in range(y1, y2 + 1): yield x, y
[ "def pairs(\n x_coordinates: Iterable[float], y_coordinates: Iterable[float]\n) -> tuple[tuple[float, float], ...]:\n pairs = tuple(zip(x_coordinates, y_coordinates))\n return pairs", "def rank_xy(pairs: Sequence[Pair]) -> Iterator[Ranked_XY]:\n return (\n Ranked_XY(r_x=r_x, r_y=rank_y_raw[0], raw=rank_y_raw[1])\n for r_x, rank_y_raw in rank(rank_y(pairs), lambda r: r.raw.x)\n )", "def points2D(self) -> tuple[Point2D, ...]:\n return tuple(map(Point2D, self._xs, self._ys))", "def double_range(limit1, limit2): #y - x\n for i1 in range(limit1):\n for i2 in range(limit2):\n yield i1, i2", "def pairs(tensor1, tensor2, name=\"pairs\"):\n with ops.name_scope(name):\n tensor1 = ops.convert_to_tensor(tensor1)\n tensor2 = ops.convert_to_tensor(tensor2)\n\n x, y = array_ops.meshgrid(tensor1, tensor2)\n\n result = array_ops.stack([x, y], axis=-1)\n result = array_ops.reshape(result, [-1, 2])\n return result", "def pts_of_grid2d(x):\n N = len(x)\n pts1 = []\n for xx in x:\n for yy in x:\n pts1.append([xx, yy])\n return np.reshape(pts1, (N*N, 2))", "def pair_iter(mat1, mat2):\n\n assert_same_size(mat1, mat2)\n \n for (x, y), our_cell in mat1:\n other_cell = mat2.get_cell(x, y)\n yield (x, y), (our_cell, other_cell)", "def cartesian_sampling(nx,ny,rmax=1.):\n x = np.linspace(-1,1,nx);\n y = np.linspace(-1,1,ny);\n x,y=np.meshgrid(x,y); \n ind = x**2 + y**2 <= rmax**2;\n return x[ind],y[ind]", "def _p_iteritems_ ( self ) :\n N = len ( self )\n for i in range ( N ) :\n yield i , ( self.x ( i ) , self.y ( i ) )", "def by_coords(self) -> Tuple[RC, Color]:\n yield from ((RC(r, c), color)\n for r, row in enumerate(self)\n for c, color in enumerate(row))", "def ordinary_points(n):\n return [(x, y) for x in range(n) for y in range(n)]", "def iter_polygonal_numbers(r):\n a = 1\n b = 1\n c = r - 2\n while 1:\n yield a\n b += c\n a += b", "def pairs(l):\n for i in range(int(len(l) / 2)):\n yield l[2*i], l[2*i+1]", "def surrounding_tiles(self, coord):\n x, y = coord\n for ox in (x - 1, x, x + 1):\n for oy in (y - 1, y, y + 1):\n if self.in_bounds(ox, oy):\n yield ox, oy", "def all_pairs(arr):\n length = len(arr)\n for i in range(length - 1):\n for j in range(i + 1, length, 1):\n yield [arr[i], arr[j]]", "def _edge_coords_3d_iter(edges):\n for a, b in edges:\n yield (a.coord + tuple([int(a.floor)]), b.coord + tuple([int(b.floor)]))", "def _coords(self, x, y):\n return y, x * 2", "def getTupleOfAllCoordinates():\n return (coordinate.Coordinate(3, 7), coordinate.Coordinate(4, 7),\n coordinate.Coordinate(5, 7), coordinate.Coordinate(3, 6),\n coordinate.Coordinate(4, 6), coordinate.Coordinate(5, 6),\n coordinate.Coordinate(1, 5), coordinate.Coordinate(2, 5),\n coordinate.Coordinate(3, 5), coordinate.Coordinate(4, 5),\n coordinate.Coordinate(5, 5), coordinate.Coordinate(6, 5),\n coordinate.Coordinate(7, 5), coordinate.Coordinate(1, 4),\n coordinate.Coordinate(2, 4), coordinate.Coordinate(3, 4),\n coordinate.Coordinate(4, 4), coordinate.Coordinate(5, 4),\n coordinate.Coordinate(6, 4), coordinate.Coordinate(7, 4),\n coordinate.Coordinate(1, 3), coordinate.Coordinate(2, 3),\n coordinate.Coordinate(3, 3), coordinate.Coordinate(4, 3),\n coordinate.Coordinate(5, 3), coordinate.Coordinate(6, 3),\n coordinate.Coordinate(7, 3), coordinate.Coordinate(3, 2),\n coordinate.Coordinate(4, 2), coordinate.Coordinate(5, 2),\n coordinate.Coordinate(3, 1), coordinate.Coordinate(4, 1),\n coordinate.Coordinate(5, 1))", "def quadrant_two(\n pairs: tuple[tuple[float, float], ...]\n) -> tuple[tuple[float, float], ...]:\n return tuple(filter(lambda p: p[0] < 0 and p[1] >= 0, pairs))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the length of a word based on character width. If a letter is not found, a width of 9 is assumed A character spacing of 1 is automatically integrated
def fontwidth(word): return sum([lookup.ASCIIPIXELS[letter] + 1 if letter in lookup.ASCIIPIXELS else 10 for letter in word]) - 1
[ "def w(i, j):\n global L\n\n width = 0\n for word in words[i: j + 1]:\n # length of a word + blank space\n width += len(word) + 1\n\n # remove last blank space\n width -= 1\n\n return width if 0 < width <= L else 0", "def _string_width(self, s):\r\n s = str(s)\r\n w = 0\r\n for i in s:\r\n w += self.character_widths[i]\r\n return w * self.font_size / 1000.0", "def get_width( o ):\n \"\"\"获取该字符在屏幕上的显示的长度\"\"\"\n global widths\n if o == 0xe or o == 0xf:\n return 0\n for num, wid in widths:\n if o <= chr(num):\n return wid\n return 1", "def string_width(string: str, widest: bool = True) -> int:\n def _widest():\n words = string.split('\\n')\n max_length = 0\n for w in words:\n length = len(w)\n if length > max_length:\n max_length = length\n return max_length\n\n def _last():\n found_newline = False\n width = 0\n for c in reversed(string):\n if c == '\\n' and found_newline:\n break\n if c == '\\n':\n found_newline = True\n continue\n if found_newline:\n width += 1\n return width\n\n return _widest() if widest else _last()", "def width(self):\n\n\t\treturn self.fontsize / 2 * len(self.text)", "def word_dimension(word, font):\n # \"display\" is a \"appuifw.Canvas\" instance associated to the global \"appuifw.app.body\". The \"measure_text\" method first return value contains the coordinates of the smallest rectangle surrounding the text given as the first argument (here word) with the font given as the second argument.\n measure = display.measure_text(word, font)\n # We return the width and height of the surrounding rectangle.\n return (measure[0][2] - measure[0][0] + 1, measure[0][3] - measure[0][1] + 1)", "def columnWidth(string):\n if app.config.strict_debug:\n assert isinstance(string, unicode)\n width = 0\n for i in string:\n width += charWidth(i, width)\n return width", "def count_chars(self, text):\r\n return len(text) - text.count(\" \")", "def my_wc(s):\n\n c = len(s)\n l = len(s.splitlines())\n w = len(s.split())\n\n return '%d\\t%d\\t%d\\n' % (l, w, c)", "def textWidth(data):\n label = pyglet.text.Label(data,\n x=0, y=0,\n anchor_x=textAlignConst[attrib.textAlign[0]],\n anchor_y=textAlignConst[attrib.textAlign[1]],\n **attrib.font)\n return label.content_width", "def ansi_len(string):\n return len(string) - wcswidth(re.compile(r'\\x1b[^m]*m').sub('', string))", "def fitToRenderedWidth(column, width, string):\n if app.config.strict_debug:\n assert isinstance(width, int)\n assert isinstance(string, unicode)\n indexLimit = len(string)\n index = 0\n for i in string:\n cols = charWidth(i, column)\n width -= cols\n column += cols\n if width < 0 or index >= indexLimit:\n break\n index += 1\n return index", "def word_spacing(computer, name, value):\r\n if value == 'normal':\r\n return 0\r\n else:\r\n return length(computer, name, value, pixels_only=True)", "def print_word(word, lineCharCount):\n if len(word) + lineCharCount > 70:\n print \"\\n\" + word,\n return len(word) + 1\n else:\n print word,\n return len(word) + 1 + lineCharCount", "def text_word_wrap(text, width):\n return textwrap.wrap(text, width)", "def scale_to_word_width(img, word, limit):\n width = min(limit, len(word) * average_char_width)\n\n return scale_to_width(img, width)", "def set_string_width(string, width):\n if len(string) >= width:\n return string[:width]\n else:\n num_spaces = width - len(string)\n left = False\n for space in range(0, num_spaces):\n if left:\n string = \" \" + string\n else:\n string += \" \"\n left = not left\n\n return string", "def printed_length(s):\n ansi_escape = re.compile(r'\\x1b[^m]*m')\n return len(ansi_escape.sub('', s))", "def check_chars(text):\n\n lenght = len(text)\n return lenght" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Place a lectern with a book in the world.
def placeLectern(x, y, z, bookData, worldModif, facing="east"): # worldModif.setBlock(x, y, z, f"lectern[facing={facing}, has_book=true]") # _utils.addBookToLectern(x, y, z, bookData) """**Place a lectern with a book in the world**.""" if facing is None: facing = choice(getOptimalDirection(x, y, z)) worldModif.setBlock(x, y, z, f"lectern[facing={facing}, has_book=true]", placeImmediately=True) command = (f'data merge block {x} {y} {z} ' f'{{Book: {{id: "minecraft:written_book", ' f'Count: 1b, tag: {bookData}' '}, Page: 0}') response = runCommand(command) if not response.isnumeric(): print(f"{lookup.TCOLORS['orange']}Warning: Server returned error " f"upon placing book in lectern:\n\t{lookup.TCOLORS['CLR']}" f"{response}")
[ "def create_book(self, title, ident):\n\n new_book = item.Book(title, ident)\n\n self.library_controller.add_item(new_book)", "def crewtut():\n\n from pirates.tutorial.CrewTutorial import CrewTutorial\n spellbook.getManager().crewTutorial = CrewTutorial()", "def book():\n update_calendar.update_calendars()\n campus = info.get_campus()\n if campus != None:\n if campus == 'jhb':\n data = display_calendar.get_jhb_data()\n elif campus == 'cpt':\n data = display_calendar.get_cpt_data()\n start = info.get_start_time()\n email = info.get_email()\n if start != False:\n if is_volunteer_available(data, start, email):\n volunteer_email = get_volunteer_email(data, start)\n event_id = get_event_id(data, start)\n service = token_tool.load_app_token()\n calendar_id = info.get_calendar_id()\n topic = get_topic(data, start)\n random_id = create_random_id()\n add_patient(service, calendar_id, event_id, \\\n volunteer_email, email, random_id)\n print(''' Successfully booked slot with following details:\\n\n\\tCampus : {}\n\\tDate : {}\n\\tTime : {}\n\\tTopic : {}\n\\tVolunteer: {}'''.format(campus, start[:10], start[11:16], topic, volunteer_email))\n else:\n print(error_span+\n 'The time slot at {} {} is not available to volunteer.'\\\n .format(start[:10], start[11:16]))\n print('\\n\\tPlease use \\33[100m./cc calendar\\33[0m \\\ncheck calendar.')", "def create_book(book_data: tuple) -> Book:\n author = input(\"Enter author name: \")\n new_book = Book(book_data[0], book_data[1], book_data[2], author)\n return new_book", "def add_book(name):\n if check_book_in_library(name):\n msg = f\"{name} is already in the Library\"\n else:\n add_book_to_library(name)\n msg = f\"{name} added to the Library\"\n click.echo(msg)", "def display_book(book_name):\n\tprint(\"One of my favourite books is \" + book_name + \".\")", "def tell_story(self):\n name = self.name_entry.get()\n verb = self.verb_entry.get()\n noun = self.noun_entry.get()\n\n places = \"\"\n if self.castle.get():\n places += \"castle, \"\n if self.mountain_temple.get():\n places += \"mountain temple, \"\n if self.beach_cottage.get():\n places += \"beach cottage. \"\n\n # create the story\n story = \"There was a princess called \"\n story += name\n story += \" She lived in a \"\n story += places\n story += \"where she \"\n story += verb\n story += \" and keep \"\n story += noun\n\n # show the story\n self.tell_story_txt.delete(0.0, END)\n self.tell_story_txt.insert(0.0, story)", "def add_book():\n return render_template(\"add_book.html\")", "def AddBook(self, book):\n thickness = book.GetThickness()\n if self.__available_capacity >= thickness:\n self.__books[book.GetTitle()] = book\n self._ReduceCapacity(thickness)\n else:\n raise RuntimeError(\"Add failed: No space available on shelf.\")", "def add_book_to_db(book: dict) -> None:\n if \"title\" in book:\n title = request.form['title']\n else:\n title = \"\"\n\n if \"authors\" in book:\n authors = \";\\n\".join(request.form['authors'].split(';'))\n else:\n authors = \"\"\n\n if \"publishedDate\" in book:\n published_date = request.form['publishedDate']\n else:\n published_date = \"\"\n\n if \"\" in book:\n industry_identifiers = request.form['industryIdentifiers']\n single_identifiers = industry_identifiers.split(';')\n industry_identifiers = \";\\n\".join([f\"{i.split(',')[0]}({i.split(',')[1]})\\n\" for i in single_identifiers])\n else:\n industry_identifiers = \"\"\n\n page_count = request.form['pageCount']\n links = \";\\n\".join(request.form['links'].split(','))\n languages = \";\\n\".join(request.form['languages'].split(','))\n\n book = Book(title=title,\n authors=authors,\n publishedDate=published_date,\n industryIdentifiers=industry_identifiers,\n pageCount=page_count,\n imageLinks=links,\n language=languages\n )\n\n DATABASE.session.add(book)\n DATABASE.session.commit()", "def addLabbook(self, name, data=None):\n if not hasattr(self.meta, 'labbooks'):\n self.meta.labbooks = PersistentMapping()\n self.meta.labbooks[name] = data\n return", "def add_book(self, book: Book):\n self.books.append(book)", "def tell_story(self):\n # get values from the GUI\n person = self.person_ent.get()\n noun = self.noun_ent.get()\n verb = self.verb_ent.get()\n adjectives = \"\"\n if self.is_itchy.get():\n adjectives += \"нетерпеливое, \"\n if self.is_joyous.get():\n adjectives += \"радостное, \"\n if self.is_electric.get():\n adjectives += \"пронизывающее, \"\n body_part = self.body_part.get()\n\n # create the story\n story = \"Изветсный исследователь \"\n story += person\n story += \" уже отчаялся завершить дело своей жизни - поиск затерянного города \"\n story += noun.title()\n story += \" пока в один день \"\n story += noun\n story += \" не нашел \"\n story += person + \". \"\n story += \"Мощное \"\n story += adjectives\n story += \"ни с чем не сравнимое чувство. \"\n story += \"После стольких лет поиска цель наконец была достигнута \"\n story += person\n story += ' ощутил как на его ' + body_part + \" скатилась слеза. \"\n story += \" Затем \"\n story += noun\n story += \" перешли в атаку \"\n story += person + \". \"\n story += \" Мораль истории? Если задумали\"\n story += verb\n story += \" будьте осторожны.\"\n\n # display the story\n self.story_txt.delete(0.0, END)\n self.story_txt.insert(0.0, story)", "def __on_add_click(self):\n title = self.__title_txt.text()\n writer = self.__writer_txt.text()\n genre = self.__genre_txt.text()\n try:\n year = int(self.__year_txt.text())\n self.__srv.add_book(title, writer, genre, year)\n self.__set_table(self.__srv.get_all_srv())\n except ValidatorException as ex:\n string = concatenate(ex.get_errs())\n QMessageBox.warning(self, \"Atentie\", string, QMessageBox.Ok)\n except ValueError:\n QMessageBox.warning(self, \"Atentie!\",\"Campul An trebuie sa contina un numar intreg!\", QMessageBox.Ok)", "def lend_book(name):\n if check_book_in_library(name):\n if check_if_lent(name):\n msg = f\"{name} is already lent, wait for it to get returned\"\n else:\n date = lend_book_from_library(name)\n msg = f\"{name} lent until {date}\"\n else:\n msg = f\"{name} is not in the Library\"\n click.echo(msg)", "def placeFurniture(furn, x, y):\n if(furn not in furniture):\n furniture[(x,y)] = furn\n\n pass", "def create_ride():", "def add_room():\n print(\"ADD A ROOM\".center(80))\n print(\"-\".center(80, '-'))\n room = str(_get_room_name())\n db.execute('INSERT into room (name) VALUES (?)', (room,))\n db.commit()\n display_menu()", "def _place_piece(self, position, piece):\n self._positions[str(position)].piece = piece" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Place an invetorized block with any number of items in the world. Items is expected to be a sequence of (x, y, item[, amount]) or a sequence of such sequences e.g. ((x, y, item), (x, y, item), ...)
def placeInventoryBlock(x, y, z, block='minecraft:chest', facing=None, items=[]): if block not in lookup.INVENTORYLOOKUP: raise ValueError(f"The inventory for {block} is not available.\n" "Make sure you are using the namespaced ID.") dx, dy = lookup.INVENTORYLOOKUP[block] if facing is None: facing = choice(getOptimalDirection(x, y, z)) gi.placeBlock(x, y, z, f"{block}[facing={facing}]") # we received a single item if 3 <= len(items) <= 4 and type(items[0]) == int: items = [items, ] response = '0' for item in items: slot = index2slot(item[0], item[1], dx, dy) if len(item) == 3: item = list(item) item.append(1) response = runCommand(f"replaceitem block {x} {y} {z} " f"container.{slot} {item[2]} {item[3]}") if not response.isnumeric(): print(f"{lookup.TCOLORS['orange']}Warning: Server returned error " f"upon placing items:\n\t{lookup.TCOLORS['CLR']}{response}")
[ "def place_spawn_items(width, height, cell_size, xmin, ymin, punishement=True):\n #Still the original function\n #Initialize\n item_start = 20;\n num_items = 4\n output=[]\n items = []\n locations=[]\n punishements=[]\n \"\"\"\n #Place spawn at random\n spawn_i = random.randint(0, width-1)\n spawn_j = random.randint(0, height-1) \n locations.append((spawn_i,spawn_j))\"\"\"\n #Place spawn not randomly\n spawn_i = 0\n spawn_j = height//2\n \n #Generate output\n spawn = (xmin + spawn_i*cell_size + cell_size/2, ymin + spawn_j*cell_size + cell_size/2)\n \n #this chooses which items \n item_tids = [2018, 2019, 2012, 2013]\n colors = ['g','r', 'b', 'c']\n\n #let's place the items\n for i in range(num_items):\n item_i = random.randint(0, width-1)\n #item_j = random.randint(0, height-1)\n item_j = random.randint(0, 1)*(height-1)\n \n #This avoid items superposition\n while (item_i, item_j) in locations:\n item_i = random.randint(0, width-1)\n item_j = random.randint(0, 1)*(height-1) \n locations.append((item_i, item_j))\n \n #add in the wad output\n item_x = xmin + item_i*cell_size + cell_size/2\n item_y = ymin + item_j*cell_size + cell_size/2\n output += create_object(item_x, item_y, item_tids[i], item_start + i)\n \n #add the item\n items.append((item_x, item_y, colors[i]))\n \n #let's place the punishement items\n for i in range(width):\n #place in upper combs\n for j in range(height//2):\n if not (0,j) in locations:\n idx=100*len(punishements)+2-j%((height//2)/2)\n item_x = xmin + i*cell_size + cell_size/2\n item_y = ymin + j*cell_size + cell_size/2\n tid=2013\n output += create_object(item_x, item_y, tid, idx) # unvisible=True)\n items.append((item_x, item_y, colors[2]))\n #place in lower combs\n for j in range(h//2+1,height):\n if not (width, j) in locations:\n idx=100*len(punishements)+(j-height//2)%((height-height//2)/2)\n item_x = xmin + i*cell_size + cell_size/2\n item_y = ymin + j*cell_size + cell_size/2\n tid=2013\n output += create_object(item_x, item_y, tid, idx) # unvisible=True)\n items.append((item_x, item_y, colors[2]))\n \n return items, output, spawn", "def place_items(level):\n ether = Item(Tools.ETHER.value)\n needle = Item(Tools.NEEDLE.value)\n plastic_tube = Item(Tools.TUBE.value)\n ether.place(level)\n needle.place(level)\n plastic_tube.place(level)", "def add_item(self,items: list):\n # get list of id's\n item_ids = self.get_inv_ids()\n\n for item in items:\n # first check if similar item is in inventory\n if item.base_identifier in item_ids:\n # check for stacking\n\n # get index of match\n item_index = item_ids.index(item.base_identifier)\n\n #get item from index\n matched_item = self.inventory[item_index]\n\n can_stack = matched_item.allow_stack\n if can_stack:\n # add amount to stack\n matched_item.amount += item.amount\n if matched_item.amount <= 0:\n self.inventory.remove(matched_item)\n # for the case where the id is there, but cannot stack, no item is added\n\n\n else:\n # new item, add to inventory and add it's base identifier to the list, if copies\n # will be added to, like multiple\n self.inventory.append(item)\n item_ids.append(item.base_identifier)\n if item.amount <= 0:\n self.inventory.remove(item)", "def __replace(self, coords, high, from_ids, blocks):\r\n \r\n blocks = ancillary.extend(blocks)\r\n for y in xrange(coords[2], coords[2] + high, numpy.sign(high)):\r\n xzy = (coords[0], coords[1], y)\r\n if not self.__inchunk(xzy):\r\n return\r\n if from_ids is None or int(self.__local_ids[xzy]) in from_ids:\r\n if int(self.__local_ids[xzy]) not in self.__block_roles.immutable: # Leave immutable blocks alone!\r\n self.__place(xzy, blocks.next())", "def gather_item(self, i):\n self.inventory[i.name] = [i.x, i.y]", "def generateItem(self, platform):\n blockItem = Block(IMG_PATH_BLOCK, platform.x+platform.w/2-15, platform.y-50, (30, 30))\n healthItem = Health(platform.x+platform.w/2-15, platform.y-50, (30, 30))\n coinItem = Coin(platform.x+platform.w/2-15, platform.y-50, (30, 30))\n self.otherMovableObjects.append(choice([healthItem, coinItem, blockItem]))", "def equip(self,item,slot_number):\n self.inventory[slot_number] = item", "def placeBlock(self):\n mousex, mousey = pygame.mouse.get_pos()\n events = pygame.event.get()\n if pygame.mouse.get_pressed()[0] and len(self.pickedUpBlocks) > 0 and self.canClick:\n placedBlock = Block(IMG_PATH_BLOCK, mousex, mousey, (100, 20))\n self.platforms.append(placedBlock)\n self.pickedUpBlocks.remove(self.pickedUpBlocks[0])\n self.changingTexts[1] = self.renderText(str(len(self.pickedUpBlocks)), COOL_FONT, (WIDTH/11*10.1, HEIGHT/10), BLACK, 25)\n placedBlock.placed = True\n self.canClick = False\n elif not pygame.mouse.get_pressed()[0]:\n self.canClick = True", "def __place(self, coords, block):\r\n \r\n self.__local_ids[coords], self.__local_data[coords] = self.__block2pair(block)", "def place_objects(self, blocklist:List[Block], block_positions:List[Pose]=None):\n print(\"placing blocks\")\n # handle case where nothing is specified\n if len(blocklist) == 0:\n print(\"Blocklist empty, returning immediately\")\n return\n\n # handle cases where there's a length/position mismatch\n if block_positions is None or len(block_positions) < len(blocklist):\n if block_positions is None:\n block_positions = []\n else:\n print(\"You've specified more blocks than block positions. Some random positions will be selected for you.\")\n for _ in blocklist:\n print(\"Randomly generating positions\")\n pos = self.generate_valid_table_position(block_positions)\n block_positions.append(pos)\n if len(block_positions) > len(blocklist):\n print(\"You've specified more block positions than blocks. Some positions will be ignored.\")\n\n # we have some z values to fix based off of the block sizes, and we'll\n # assume all theta's should be 0 and then we actually get to work\n # placing that stuff\n for n in range(0, len(blocklist)):\n if blocklist[n] == Block.NONE:\n continue\n block_positions[n].z = self.TABLE_HEIGHT + block_size_data[blocklist[n]].height/2\n block_positions[n].theta = 0\n thisblock = BlockObject(self.client, Pose(block_positions[n].x, block_positions[n].y, block_positions[n].z), blocklist[n])\n self.blocks.append(thisblock)\n # make sure large block starts as base of tower\n if thisblock.btype == Block.LARGE:\n BLOCKTOWER.clear()\n BLOCKTOWER.append(thisblock)\n p.stepSimulation()", "def blocked_items(self, blocked_items):\n\n self._blocked_items = blocked_items", "def placeBlockAtPosition(layer, position):\n #SET UP VARIABLES\n currentPosition = Robot.getPosArgsCopy() #Back up the position in case of error\n heightForLayer = {\"1\": -32, \"2\": -15, \"3\": 3, \"4\": 24, \"5\": 40.5}\n print \"placeBlockAtPosition(\", locals().get(\"args\"), \"): Current pos: \", currentPosition\n\n #PICK THE RELEVANT MOVEMENTS FOR EACH LAYER/POSITION, AND PERFORM IT\n if not layer % 2: #IF THIS IS THE VERTICAL LAYER\n print \"PLACING DOWN VERTICAL BLOCK ON POSITION \", position, \"ON LAYER \", layer\n #HOW MUCH STRETCH / ROTATION IS NECESSARY FOR EACH POSITION. [POSITION]\n rotToMoveSide = [19, 0, -19]\n rotToMoveSlip = [-12, 0, 10]\n stretchToMove = [-2, 0, -2] #Adjust the angle so it is truly vertical.\n wristToMove = [-10, 0, 10]\n Robot.moveTo(height = heightForLayer[str(layer)] + 34, wrist = wristToMove[position], relative = False) #Move Wrist BEFORE going all the way down, or else you might knock some blocks\n Robot.moveTo(rotation = rotToMoveSide[position], stretch = stretchToMove[position]) #Go besides (but offset) to the end position of the block\n sleep(.1)\n Robot.moveTo(height = heightForLayer[str(layer)], relative = False) #Finish going down\n waitTillStill()\n Robot.moveTo(rotation = rotToMoveSlip[position] * .75) #Slip block in to the correct position sideways (Hopefully pushing other blocks into a better position\n sleep(.1)\n Robot.moveTo(rotation = rotToMoveSlip[position] * .25) #Slip block in to the correct position sideways (Hopefully pushing other blocks into a better position\n\n else: #IF THIS IS THE HORIZONTAL LAYER\n stretchToMoveSide = [-45, 0, 49] #To get parallel to the part\n stretchToMoveSlip = [25, 0, -25] #To slip it in all the way\n wristToMove = 73.5 #Equivalent to 90 degrees, for some reason\n Robot.moveTo(height = heightForLayer[str(layer)] + 34, wrist = wristToMove, relative = False) #Move Wrist BEFORE going all the way down, or else you might knock some blocks\n Robot.moveTo(stretch = stretchToMoveSide[position])\n sleep(.1)\n Robot.moveTo(height = heightForLayer[str(layer)], relative = False) #Finish going down\n waitTillStill()\n Robot.moveTo(stretch = stretchToMoveSlip[position] * .75) #Ease into the correct position by splitting it into two moves\n sleep(.1)\n Robot.moveTo(stretch = stretchToMoveSlip[position] * .25)\n\n #DROP BRICK AND GO BACK TO ORIGINAL POSITION\n sleep(.1)\n print Robot.pos\n Robot.setGrabber(0)\n Robot.moveTo(height = currentPosition[\"height\"], relative = False)\n sleep(.2)\n Robot.moveTo(relative = False, **currentPosition)\n waitTillStill()", "def animate_question_block(self):\n timer = pygame.time.get_ticks()\n if self.is_hit and (self.item.type == 1 or self.item.type == 2 or self.item.type == 3):\n # If the block is hit and the item is NOT a coin, the item slowly rises up\n rise_until = 12 # Size of most sprites, may need to change\n drop_until = 24\n item_show = 50\n item_rise_until = 55\n if self.animation_px_counter < rise_until:\n self.rect.y -= 2\n elif self.animation_px_counter < drop_until:\n self.rect.y += 2\n elif self.animation_px_counter < item_rise_until:\n self.item.rect.y -= 1\n if self.animation_px_counter > item_show:\n self.item_active = True\n self.animation_px_counter += 1\n self.image = self.images[4]\n elif self.is_hit and self.item.type == 4:\n # If the block is hit and the item is a coin, the coin jumps up and falls back down\n rise_until = 12 # 32 because the coin jumps pretty high\n drop_until = 24\n item_show = 45\n item_rise_until = 98\n item_drop_until = 146\n self.block_frame = 4\n if self.animation_px_counter < rise_until:\n self.rect.y -= 2 # Arbitrary value\n self.animation_px_counter += 1\n elif self.animation_px_counter < drop_until:\n self.rect.y += 2\n self.animation_px_counter += 1\n elif self.animation_px_counter < item_rise_until:\n # Give time to let item image rise above block; could potentially change starting position higher\n self.item.rect.y -= 1\n if self.animation_px_counter > item_show:\n self.item_active = True\n self.animation_px_counter += 1\n elif self.animation_px_counter < item_drop_until:\n self.item.rect.y += 1\n self.animation_px_counter += 1\n elif self.animation_px_counter >= item_drop_until:\n self.item_active = False\n self.image = self.images[4]\n else:\n self.image = self.images[(int(timer / 200) % 4)]", "def draw_inventory(self, inventory):\r\n self.screen.blit(self.inventory, (480, 0))\r\n for i in range(len(inventory.get_items())):\r\n display = self.small_font.render(inventory.get_items()[i], True, WHITE, BLACK)\r\n self.screen.blit(display, (480, TILE_SIZE//2*(i+1)))", "def gen_block(self):\n new_block = random.randrange(7)\n self.center_block = [4, 4]\n if new_block == 0:\n # Z Block\n self.falling_blocks = [[4, 4], [4, 5], [3, 4], [3, 3]]\n elif new_block == 1:\n # L Block\n self.falling_blocks = [[4, 4], [4, 3], [4, 5], [3, 5]]\n elif new_block == 2:\n # O Block\n self.falling_blocks = [[4, 4], [4, 5], [3, 4], [3, 5]]\n elif new_block == 3:\n # S Block\n self.falling_blocks = [[4, 4], [4, 3], [3, 4], [3, 5]]\n elif new_block == 4:\n # I Block\n self.falling_blocks = [[4, 4], [4, 3], [4, 5], [4, 6]]\n elif new_block == 5:\n # J Block\n self.falling_blocks = [[4, 4], [4, 5], [4, 3], [3, 3]]\n elif new_block == 6:\n # T Block\n self.falling_blocks = [[4, 4], [4, 3], [4, 5], [3, 4]]\n stop = False\n for block in self.falling_blocks:\n if self.grid[block[0]][block[1]][0] == 1:\n stop = True\n if stop:\n self.done = True\n else:\n for block in self.falling_blocks:\n self.grid[block[0]][block[1]] = [-1, BLOCK_SPRITES[new_block]]", "def inventory_insert(item_name):\n\n if item_name in Player.Inventory and equip_check(item_name):\n Player.Inventory[item_name][Item.count] += 1\n\n else:\n kv_pair = copy.deepcopy({item_name : Items[item_name]})\n Player.Inventory.update(kv_pair)\n Player.Inventory[item_name][Item.count] += 1", "def PushItem(self, item, block=True):\n raise errors.WrongQueueType", "def add_hay(x, y, item):\r\n s = 0\r\n for s in range(0, len(existing_farms)):\r\n ''' This code below will make sure hay bales dont spawn on top of eachother'''\r\n if x > existing_farms[s][0] and x < existing_farms[s][1] and y > existing_farms[s][2] and y < existing_farms[s][3]:\r\n add_hay(randint(50, 1150), randint(50, 650), haybail)\r\n return None\r\n main_canvas.create_image(x, y, image=item, anchor=NW)\r\n main_canvas.create_image(x, y, image=item, anchor=NW)", "def equip_item(self, key):\n\t\tself.player.begin_player_equip_item()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Place a written sign in the world. Facing is for wall placement, rotation for ground placement If there is no supporting wall the sign will revert to ground placement By default the sign will attempt to orient itself to be most legible
def placeSign(x, y, z, facing=None, rotation=None, text1="", text2="", text3="", text4="", wood='oak', wall=False): if wood not in lookup.WOODS: raise ValueError(f"{wood} is not a valid wood type!") if facing is not None and facing not in lookup.DIRECTIONS: print(f"{facing} is not a valid direction.\n" "Working with default behaviour.") facing = None try: if not 0 <= int(rotation) <= 15: raise TypeError except TypeError: if rotation is not None: print(f"{rotation} is not a valid rotation.\n" "Working with default behaviour.") rotation = None if facing is None and rotation is None: facing = getOptimalDirection(x, y, z) if wall: wall = False for direction in facing: inversion = lookup.INVERTDIRECTION[direction] dx, dz = lookup.DIRECTIONTOVECTOR[inversion] if getBlock(x + dx, y, z + dz) in lookup.TRANSPARENT: break wall = True gi.placeBlock( x, y, z, f"{wood}_wall_sign[facing={choice(facing)}]") if not wall: if rotation is None: reference = {'north': 0, 'east': 4, 'south': 8, 'west': 12} if len(facing) == 1: rotation = reference[lookup.INVERTDIRECTION[facing[0]]] else: rotation = 0 for direction in facing: rotation += reference[lookup.INVERTDIRECTION[direction]] rotation //= 2 if rotation == 6 and 'north' not in facing: rotation = 14 if rotation % 4 != 2: rotation = reference[facing[0]] gi.placeBlock(x, y, z, f"{wood}_sign[rotation={rotation}]") data = "{" + f'Text1:\'{{"text":"{text1}"}}\',' data += f'Text2:\'{{"text":"{text2}"}}\',' data += f'Text3:\'{{"text":"{text3}"}}\',' data += f'Text4:\'{{"text":"{text4}"}}\'' + "}" runCommand(f"data merge block {x} {y} {z} {data}")
[ "def build_entity(raw_sign_image, sign_rect, sign_data, x, y):\t#not sure if I can use sign_key-- might need a different structure to figure out the proper text\n\t\tstill_entity_image = GameImage.still_animation_set(raw_sign_image, sign_rect)\n\t\tsign = Sign(still_entity_image, x, y)\n\t\tsign_text_panes = sign_data.text_panes\n\t\tsign.set_text_set(sign_text_panes)\n\t\treturn sign", "def place_pin(self):\n\n pin = turtle.Turtle()\n pin.penup()\n pin.color(self.color) # Set the pin to user's chosen color\n pin.shape(\"circle\") # Sets the pin to a circle shape\n\n # Logically, the denominator for longitude should be 360; lat should be 180.\n # These values (195 and 120) were determined through testing to account for\n # the extra white space on the edges of the map. You shouldn't change them!\n pin.goto(self.coordinates[0], self.coordinates[1])\n pin.stamp() # Stamps on the location\n text = \"{0}'s place:\\n {1}\".format(self.person, self.name) # Setting up pin label\n pin.write(text, font=(\"Arial\", 10, \"bold\"))", "def sign(self, *signers: Keypair) -> None:\n self.sign_partial(*signers)", "def set_sign(self, symbol):\n if symbol == \"-\":\n self.sign = symbol", "def signed_movement(self, signed_movement):\n\n self._signed_movement = signed_movement", "def update_signs(self,signs):\n # Throw error if tensor is not loaded\n if not self.in_mem: raise ValueError('GEN_TEN not in memory for operation flip_signs')\n\n # Change signs for symtensor\n if self.sym is not None:\n self.sym[0] = signs\n self.ten.sym[0] = signs", "def stay_put(self):\n self.go_to(self.pos.x,self.pos.y, self.pos.theta)", "def add_L(self, space):\r\n rotation_center_body = pm.Body(body_type=pm.Body.STATIC)\r\n rotation_center_body.position = (300, 300)\r\n\r\n rotation_limit_body = pm.Body(body_type=pm.Body.STATIC)\r\n rotation_limit_body.position = (300, 300)\r\n body = pm.Body(650, 650)\r\n body.position = (300, 600)\r\n l1 = pm.Segment(rotation_limit_body, (25, -20), (25, 200.0), 1.0)\r\n l2 = pm.Segment(rotation_limit_body, (-25, -20), (-25, 200.0), 1.0)\r\n\r\n l3 = pm.Segment(rotation_limit_body, (25, -20), (85.0, -90.0), 1.3)\r\n l4 = pm.Segment(rotation_limit_body, (-25, -20), (-85.0, -90.0), 1.3)\r\n l8 = pm.Segment(body, (-250, -200), (200.0, -200.0), 5.0)\r\n\r\n rotation_center_joint = pm.PinJoint(body, rotation_center_body, (0, -200), (0, -200))\r\n joint_limit = .03\r\n rotation_limit_joint = pm.SlideJoint(body, rotation_limit_body, (-20, -200), (0, -200), 0, joint_limit)\r\n\r\n space.add(l1, l2, l3, l4, l8, rotation_limit_body, rotation_center_body, body, rotation_center_joint,\r\n rotation_limit_joint)\r\n return l8", "def draw_treasure(self, location):\r\n self.screen.blit(self.treasure, location)", "def __sign_(self):\n arr, json_str = self.serialize()\n salt = self.salt + self.__random_str_\n\n return Cipher.encrypt(Signer.sign(json_str, self.__secret_key_), self.secret_key, salt)", "def solid(self):\n return RotatedShape(shape_in=self.endplate.solid,\n rotation_point=self.position.point,\n vector=self.main[0].surface.position.orientation.Vx,\n angle=radians(-self.cant),\n label=\"right_side\",\n hidden=self.hide)", "def zodiac_sign(sign_num):\n global COLUMNS\n CAPRICORN = 119\n AQUARIUS = 218\n PISCES = 320\n ARIES = 419\n TAURUS = 520\n GEMINI = 620\n CANCER = 722\n LEO = 822\n VIRGO = 922\n LIBRA = 1022\n SCORPIO = 1121\n SAGGITARIUS = 1221\n\n if sign_num <= CAPRICORN:\n print(\"You are a CAPRICORN. That is a good sign!\")\n sign = \"CAPRICORN\"\n if sign_num > SAGGITARIUS:\n print(\"You are a CAPRICORN. That is a good sign!\")\n sign = \"CAPRICORN\"\n elif CAPRICORN < sign_num <= AQUARIUS:\n print(\"You are a AQUARIUS. That is a good sign!\")\n sign = \"AQUARIUS\"\n elif AQUARIUS < sign_num <= PISCES:\n print(\"You are a PISCES. That is a good sign!\")\n sign = \"PISCES\"\n elif PISCES < sign_num <= ARIES:\n print(\"You are a ARIES. That is a good sign!\")\n sign = \"ARIES\"\n elif ARIES < sign_num <= TAURUS:\n print(\"You are a TAURUS. That is a good sign!\")\n sign = \"TAURUS\"\n elif TAURUS < sign_num <= GEMINI:\n print(\"You are a GEMINI. That is a good sign!\")\n sign = \"GEMINI\"\n elif GEMINI < sign_num <= CANCER:\n print(\"You are a CANCER. That is a good sign!\")\n sign = \"CANCER\"\n elif CANCER < sign_num <= LEO:\n print(\"You are a LEO. That is a good sign!\")\n sign = \"LEO\"\n elif LEO < sign_num <= VIRGO:\n print(\"You are a VIRGO. That is a good sign!\")\n sign = \"VIRGO\"\n elif VIRGO < sign_num <= LIBRA:\n print(\"You are a LIBRA. That is a good sign!\")\n sign = \"LIBRA\"\n elif LIBRA < sign_num <= SCORPIO:\n print(\"You are a SCORPIO. That is a good sign!\")\n sign = \"SCORPIO\"\n elif SCORPIO < sign_num <= SAGGITARIUS:\n print(\"You are a SAGGITARIUS. That is a good sign!\")\n sign = \"SAGGITARIUS\"\n\n if int(COLUMNS) > 140:\n ascii_art(sign.lower())", "def set_rotation(self, rotation: float):", "def placer():\n\n base.camera.place()\n return 'Toggled placer.'", "def sign(self):\n self.__signature_ = self.__sign_()\n return self.serialize(signature=self.__signature_)", "def create_graphic(self):\n x, y = self.coords\n self.graphic_id = self.world.create_arc(x - Entity.RADIUS, y - Entity.RADIUS,\n x + Entity.RADIUS, y + Entity.RADIUS,\n # A little mouth\n start=self.heading + self.mouth_angle / 2,\n extent= 360 - self.mouth_angle,\n fill=self.color, outline=self.outline)", "def sign(key, file, output, clearsign=False):\n signopt = \"--clearsign\" if clearsign else \"--detach-sign\"\n GPG(signopt, \"--armor\", \"--default-key\", key, \"--output\", output, file)", "def __adjust_sign(self,hemi):\n pos = ['N','E']\n neg = ['S','W']\n if hemi:\n if hemi.upper() in neg:\n self.degrees = -abs(self.degrees)\n elif hemi.upper() in pos:\n self.degrees = abs(self.degrees)\n else:\n raise ValueError( \"Hemisphere should be N, S, E, or W. Why are you giving me %s?\" % (hemi,) )", "def pseudoinstr_signature(self):\n # Expect addresses in registers r29 and r28.\n sigbot = self.cpu._rbank[29]\n sigtop = self.cpu._rbank[28]\n bounds = (sigbot, sigtop)\n self.write_signature(bounds)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the percieved obtrusiveness of a given block. Returns a numeric weight from 0 (invisible) to 4 (opaque)
def identifyObtrusiveness(blockStr): if blockStr in lookup.INVISIBLE: return 0 if blockStr in lookup.FILTERING: return 1 if blockStr in lookup.UNOBTRUSIVE: return 2 if blockStr in lookup.OBTRUSIVE: return 3 return 4
[ "def get_blocker_wind_efficiency(self, index_of_the_star):\n return self.get_control(index_of_the_star,'blocker_scaling_factor')", "def calc_shield_power(block_count, active=False):\n if active:\n return block_count * 55.0 \n else:\n return block_count * 5.5", "def get_weight(self) -> float:\n return 0", "def calc_thrust(block_count):\n return pow(block_count * 5.5, 0.87) * 0.75", "def cell_per_well(self) -> float:\n return self.cell_per_surface * self.well_surface", "def thermal_conductivity(self):\n return 70.0 * units.watt / (units.meter * units.kelvin)", "def calc_shield_capacity(block_count):\n return pow(block_count, 0.9791797578) * 110.0 + 220.0", "def productivityMultiplier(self) -> float:\n return self._getMultiplier('productivity')", "def calc_shield_recharge(block_count):\n return block_count * 5.5", "def calc_thrust_power(block_count):\n return block_count / 0.03", "def neg_sharpe_ratio(weights,riskfree_rate,er,cov):\r\n r=portfolio_return(weights,er)\r\n vol=portfolio_vol(weights,cov)\r\n return -(r-riskfree_rate)/vol", "def partial_pressure(self):\n return self.percentage * self.planetary_body.surface_pressure", "def bmi(weight, height):\n index = weight / (height * height)\n return round(index, 2)", "def getWeightedValue():\n\t\tweight*value", "def shear(self):\r\n return (self.shear_Voigt + self.shear_Reuss) / 2", "def __get_damage_blocked(self) -> int:\n return int(self.raw_damage * self.target.damage_reduction)", "def effect_size(self):\n return 1 - self.rates_ratio", "def colorfulness(im):\n rg = im[:, :, 2]-im[:, :, 1]\n yb = .5 * (im[:, :, 2] + im[:, :, 1]) - im[:, :, 0]\n try:\n sigma_rgyb = math.sqrt(rg.std()**2 + yb.std()**2)\n except:\n print('uh oh...')\n print(im.shape)\n print(im.compressed())\n print(np.sum(im.mask))\n mu_rgyb = math.sqrt(rg.mean()**2 + yb.mean()**2)\n return sigma_rgyb + .3 * mu_rgyb", "def _filter(self, observation):\n return observation.feature_screen.unit_hit_points_ratio / 255.0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process a text and create a Doc object.
def process_text(model_name: str, text: str) -> spacy.tokens.Doc: nlp = load_model(model_name) return nlp(text)
[ "def __call__(self, text: str) -> Doc:\n if self.preprocessor:\n text = self.preprocessor(text)\n juman_lines = self._juman_parse(text)\n dtokens = self._detailed_tokens(juman_lines)\n doc = self._dtokens_to_doc(dtokens)\n self.set_juman_fstring(doc, juman_lines)\n return doc", "def rst_document(text: str) -> docutils.nodes.document:\n\n document = docutils.utils.new_document(\"<input>\", default_settings)\n parser = docutils.parsers.rst.Parser()\n parser.parse(text, document)\n return document", "def add_text_in_doc(self, text):\n self.text = text\n self.title_doc()", "def textacy_text(self):\n text = text_utils.preprocess(self.content)\n return textacy.Doc(SPACY_NLP(text))", "def __init__(self, path=\"\", text=\"\"):\n\n if path is not \"\" and text is not \"\":\n print('Error! Only one between file path and text must be provided')\n exit(1)\n\n eng = spacy.load('en', disable=['ner'])\n tokenizer = English().Defaults.create_tokenizer(eng)\n\n if path is not \"\":\n try:\n with open(path, 'r') as f:\n self._text_read = f.read()\n self._text = eng(self._text_read)\n\n except FileNotFoundError:\n print('Can\\'t access the file specified by', path,\n ', please provide a valide path')\n\n else:\n self._text_read = text\n self._text = eng(text)\n\n self._doc = tokenizer(self._text_read) # spacy-doc object\n # with preprocessed file", "def get_nlp(input_text):\n doc = PARSER(input_text)\n return doc", "def make_document(source, target, text, session=None):\n d = iwqet.Document(source, target, text, proc=True, session=session)\n return d", "def text_to_instance(self, doc_text: Dict[str, Any]):\n doc = Document.from_json(doc_text)\n\n # Make sure there are no single-token sentences; these break things.\n sent_lengths = [len(x) for x in doc.sentences]\n if min(sent_lengths) < 2:\n msg = (f\"Document {doc.doc_key} has a sentence with a single token or no tokens. \"\n \"This may break the modeling code.\")\n warnings.warn(msg)\n\n fields = self._process_sentence_fields(doc)\n fields[\"metadata\"] = MetadataField(doc)\n\n return Instance(fields)", "def preprocess_text(doc):\n doc = doc.lower()\n if remove_headers:\n doc = doc[doc.find(\"<html>\"):]\n if re.search(\"<html>\", doc) is not None and process_html:\n doc = proc_html(doc)\n return doc", "def process_doc(self, env: BuildEnvironment, docname: str,\n document: nodes.document) -> None:\n pass", "def __init__(self, doc_text):\n\n sents = self._parse_doc_text(doc_text)\n mentions = self._get_mentions(sents)\n pairs, labels = self._generate_mention_pairs_and_labels(mentions)\n self.mention_pairs = pairs\n self.labels = labels", "def text_creator(pdfContent):\n splitPdfContent = pdfContent.split('\\n')\n with open('pdfContent.text', 'w') as text:\n for mot in splitPdfContent:\n text.write(mot + '\\n')\n text.close()\n return text", "def __init__(self, text):\n\n self.text = text\n\n self.tokenize()", "def helper_doc_to_text(filename):\n\n text_file = filename.split('.')[0] + \".txt\"\n with open(text_file, \"w\") as fo:\n try:\n command = \"catdoc\"\n option = \"-w\" # Disables word wrapping\n\n raw_text = subprocess.check_output([command, option, filename])\n\n fo.write(raw_text)\n except OSError:\n print \"Command %s could not be executed.\" % command", "def parse(self, rtf_text, filename, file=None):\n parsed_text = self._remove_tags(self._clean_url_field(self._create_newlines(rtf_text)))\n date = self._find_date(parsed_text)\n time = self._find_time(parsed_text)\n if date is None:\n # print('no date')\n return\n else:\n try:\n filename = filename + date + '_' + time\n except TypeError:\n print('halt')\n write_file(parsed_text, filename, self.output_directory)\n self.files_output[file] += 1", "def process_text(config: Dict[str, Any]) -> Tuple[str,\n Sequence[Tuple[str, str]],\n Sequence[Tuple[str, Sequence[Sequence[str]]]]]:\n source_type = config[\"source_type\"]\n source_path = config[\"source_path\"]\n\n _logger.debug(\"Using source '{}' ({})\".format(source_path, source_type.upper()))\n\n raw_html = get_raw_source(source_type, source_path)\n\n title_raw = _title_from_html(raw_html)\n headers_raw = _headers_from_html(raw_html)\n _logger.debug(\"HEADERS: {}\".format(headers_raw))\n\n texts_by_chapters = _texts_by_chapters(raw_html, headers_raw)\n sentences_by_chapters = text_by_chapters_to_sentences_by_chapters(texts_by_chapters)\n _logger.debug(\"SENTENCES BY CHAPTERS: {}\".format(sentences_by_chapters))\n\n # Preprocessing\n processed_wordlists_by_chapters = _preprocess_sentences(sentences_by_chapters)\n _logger.debug(\"PROCESSED LISTS OF WORDS BY CHAPTERS: {}\".format(processed_wordlists_by_chapters))\n\n return title_raw, texts_by_chapters, processed_wordlists_by_chapters", "def process_text(trove_key, article_id):\n data = troveAPI.trove_api_get(trove_key, article_id)\n text = data['article']['articleText']\n processed_text = pre_process(text)\n return processed_text", "def _parse(txt):\n from mwlib.refine.compat import parse_txt\n from mwlib import parser\n \n res = parse_txt(txt)\n \n\n # res is an parser.Article. \n if len(res.children)!=1:\n res.__class__ = parser.Node\n return res\n\n res = res.children[0]\n \n if res.__class__==parser.Paragraph:\n res.__class__ = parser.Node\n\n return res\n\n # if len(res.children)!=1:\n # return res\n # return res.children[0]", "def parse(text, lang='xml', style='default'):\n parser = core.Parser(lang, style)\n parser.parse(text)\n return (parser.document, parser.log)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the status of a registered wallet
async def get_wallet_status(self, wallet_id: str) -> dict: result = await self._fetch(messages.WalletStatusReq(wallet_id), messages.WalletStatus) return result.status
[ "def jsonrpc_wallet_status(self, wallet_id=None):\n if self.wallet_manager is None:\n return {'is_encrypted': None, 'is_syncing': None, 'is_locked': None}\n wallet = self.wallet_manager.get_wallet_or_default(wallet_id)\n return {\n 'is_encrypted': wallet.is_encrypted,\n 'is_syncing': len(self.ledger._update_tasks) > 0,\n 'is_locked': wallet.is_locked\n }", "def wallet(self):\n\t\tdat = self.conn.call('GET', '/api/wallet/').json()\n\t\tbalance = float(dat['data']['total']['balance'])\n\t\tsendable = float(dat['data']['total']['sendable'])\n\t\treturn (balance, sendable)", "def wallet_detail(self):\n\t\tdat = self.conn.call('GET', '/api/wallet/').json()\n\t\treturn dat['data']", "def wallet(self, full=False):\n return self._wallet.info()", "async def wallet_show(self, ctx):\n\t\tuser = ctx.message.author\n\t\tscopes = [\"wallet\"]\n\t\tendpoint = \"account/wallet\"\n\t\tkeydoc = await self.fetch_key(user)\n\t\ttry:\n\t\t\tawait self._check_scopes_(user, scopes)\n\t\t\tkey = keydoc[\"key\"]\n\t\t\theaders = self.construct_headers(key)\n\t\t\tresults = await self.call_api(endpoint, headers)\n\t\texcept APIKeyError as e:\n\t\t\tawait self.bot.say(e)\n\t\t\treturn\n\t\texcept APIError as e:\n\t\t\tawait self.bot.say(\"{0.mention}, API has responded with the following error: \"\n\t\t\t\t\t\t\t \"`{1}`\".format(user, e))\n\t\t\treturn\n\t\twallet = [{\"count\": 0, \"id\": 1, \"name\": \"Gold\"},\n\t\t\t\t {\"count\": 0, \"id\": 4, \"name\": \"Gems\"},\n\t\t\t\t {\"count\": 0, \"id\": 2, \"name\": \"Karma\"},\n\t\t\t\t {\"count\": 0, \"id\": 3, \"name\": \"Laurels\"},\n\t\t\t\t {\"count\": 0, \"id\": 18, \"name\": \"Transmutation Charges\"},\n\t\t\t\t {\"count\": 0, \"id\": 23, \"name\": \"Spirit Shards\"},\n\t\t\t\t {\"count\": 0, \"id\": 32, \"name\": \"Unbound Magic\"},\n\t\t\t\t {\"count\": 0, \"id\": 15, \"name\": \"Badges of Honor\"},\n\t\t\t\t {\"count\": 0, \"id\": 16, \"name\": \"Guild Commendations\"}]\n\t\tfor x in wallet:\n\t\t\tfor curr in results:\n\t\t\t\tif curr[\"id\"] == x[\"id\"]:\n\t\t\t\t\tx[\"count\"] = curr[\"value\"]\n\t\taccountname = keydoc[\"account_name\"]\n\t\tcolor = self.getColor(user)\n\t\tdata = discord.Embed(description=\"Wallet\", colour=color)\n\t\tfor x in wallet:\n\t\t\tif x[\"name\"] == \"Gold\":\n\t\t\t\tx[\"count\"] = self.gold_to_coins(x[\"count\"])\n\t\t\t\tdata.add_field(name=x[\"name\"], value=x[\"count\"], inline=False)\n\t\t\telif x[\"name\"] == \"Gems\":\n\t\t\t\tdata.add_field(name=x[\"name\"], value=x[\"count\"], inline=False)\n\t\t\telse:\n\t\t\t\tdata.add_field(name=x[\"name\"], value=x[\"count\"])\n\t\tdata.set_author(name=accountname)\n\t\ttry:\n\t\t\tawait self.bot.say(embed=data)\n\t\texcept discord.HTTPException:\n\t\t\tawait self.bot.say(\"Need permission to embed links\")", "def get_status(self):\r\n\r\n try:\r\n req = self.config.session.get(\r\n self.status_url, verify=self.config.verify, timeout=self.config.timeout)\r\n res = json.loads(req.text)['state']\r\n return res\r\n except requests.exceptions.RequestException as e:\r\n raise VraSdkRequestException(\r\n f'Error requesting status url {self.status_url}: {e}')\r\n except Exception as e:\r\n raise VraSdkMainRequestException(\r\n f'Unmanaged error requesting status url {self.status_url}: {e}')", "async def get_account_status(self, **params):\r\n return await self.client_helper(\"get_account_status\", **params)", "def testnet():\n return wallet['obj'].testnet", "def _get_status(self):\n return self._read_byte(_BME280_REGISTER_STATUS)", "def get_status():\n \n return db.get_db().getRoot().getS(ns.l2tpDeviceStatus, rdf.Type(ns.L2tpDeviceStatus))", "def test_retrieve_wallet(db, client):\n _path = apiutils.create_wallet_path()\n response = apiutils.post(\n db, client, _path,\n {'name': 'wallet with balance', 'balance': '100.00'}\n )\n assert response.status_code == 201\n w_path = apiutils.get_wallet_path(wallet_pk=1)\n response = apiutils.get(db, client, w_path)\n assert response.status_code == 200\n assert data_test_wallet.validate_wallet(response.json())\n assert response.json()['balance'] == '0.00'", "def get_status(self) -> NodeManagerStatus:", "def current_address():\n return wallet['obj'].current_address", "async def get_wallet(tsan_data: AnchorData):\n\n w_mgr = WalletManager()\n rv = None\n\n wallet_config = {\n 'id': tsan_data.name\n }\n if tsan_data.wallet_type:\n wallet_config['storage_type'] = tsan_data.wallet_type\n if tsan_data.wallet_create:\n if tsan_data.seed:\n wallet_config['seed'] = tsan_data.seed\n try:\n rv = await w_mgr.create(wallet_config, access=tsan_data.wallet_access)\n logging.info('Created wallet %s', tsan_data.name)\n except ExtantWallet:\n rv = w_mgr.get(wallet_config, access=tsan_data.wallet_access)\n logging.warning(\n 'Wallet %s already exists: remove seed and wallet.create from config file',\n tsan_data.name)\n else:\n rv = w_mgr.get(wallet_config, access=tsan_data.wallet_access)\n\n return rv", "def get_network_status(self, network):\n\n with self._lock:\n with sqlite3.connect(self._database_name) as connection:\n status_query = connection.execute(f\"SELECT * from networks WHERE name='{network}'\")\n\n return status_query.fetchone()[4]", "def get_status(self):\n self.doGet(STATUS_API, DEFAULT_HEADERS)\n self.parse_response_as_json()", "def list_wallets(self):\n return [\n {\"path\": k, \"synchronized\": w.is_up_to_date()}\n for k, w in self.daemon.wallets.items()\n ]", "def get_account_information(self, coin):\n\n accounts = self.auth_client.get_accounts()\n for account in accounts:\n if coin in account['currency']:\n return float(account['available'])\n\n return None", "def get_status(self):\n return StatusAPI.from_client(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register a verifier service
async def register_verifier(self, wallet_id: str, config: dict) -> str: result = await self._fetch( messages.RegisterAgentReq(AgentType.verifier.value, wallet_id, config), messages.AgentStatus) return result.agent_id
[ "def register(self, service, factory=..., instance=..., scope=..., **kwargs):\n ...", "def register_service_and_impl(self, service, scope, impl, resolve_args):\n ...", "def register_concrete_service(self, service, scope):\n ...", "def save_verifier(self, token, verifier, request):\r\n log.debug('Save verifier %r for %r', verifier, token)\r\n self._verifiersetter(\r\n token=token, verifier=verifier, request=request\r\n )", "def test_verifier_verifier(self):\n verifier = aws_encryption_sdk.internal.crypto.Verifier(\n algorithm=self.mock_algorithm,\n public_key=self.mock_verifier_public_key,\n signature=sentinel.signature\n )\n self.mock_algorithm.kdf_hash_type.assert_called_once_with()\n self.mock_cryptography_ec.ECDSA.assert_called_once_with(sentinel.hash_instance)\n self.mock_verifier_public_key.verifier.assert_called_once_with(\n signature=sentinel.signature,\n signature_algorithm=sentinel.ecdsa_instance\n )\n assert verifier.verifier == self.mock_verifier_instance", "def register_service_agent(cm, sc, conf, rpcmgr):\n\n service_type = lb_constants.SERVICE_TYPE\n cm.register_service_agent(service_type, rpcmgr)", "def register_service(service, registry_uid):\n from Acquire.Service import Service as _Service\n if not isinstance(service, _Service):\n raise TypeError(\"You can only register Service objects\")\n\n if not service.uid().startswith(\"STAGE1\"):\n raise PermissionError(\n \"You can only register services that are at STAGE1 of \"\n \"construction\")\n\n if service.service_type() == \"registry\":\n from Acquire.Registry import get_registry_details \\\n as _get_registry_details\n\n details = _get_registry_details(registry_uid=registry_uid)\n\n from Acquire.Service import Service as _Service\n canonical_url = _Service.get_canonical_url(details[\"canonical_url\"])\n\n # make sure that everything matches what was specified\n if canonical_url != service.canonical_url():\n raise PermissionError(\n \"Cannot change the canonical URL. I expect %s, but \"\n \"you are trying to set to %s\" %\n (service.canonical_url(), details[\"canonical_url\"]))\n\n from Acquire.Registry import update_registry_keys_and_certs \\\n as _update_registry_keys_and_certs\n\n _update_registry_keys_and_certs(\n registry_uid=registry_uid,\n public_key=service.public_key(),\n public_certificate=service.public_certificate())\n\n service.create_stage2(service_uid=registry_uid,\n response=service._uid)\n return service\n\n # get the trusted registry\n from Acquire.Registry import get_trusted_registry_service \\\n as _get_trusted_registry_service\n registry_service = _get_trusted_registry_service(\n service_uid=registry_uid)\n\n if not registry_service.is_registry_service():\n raise PermissionError(\n \"You can only register new services on an existing and valid \"\n \"registry service. Not %s\" % registry_service)\n\n from Acquire.ObjectStore import bytes_to_string as _bytes_to_string\n pubkey = registry_service.public_key()\n challenge = pubkey.encrypt(service.uid())\n\n args = {\"service\": service.to_data(),\n \"challenge\": _bytes_to_string(challenge),\n \"fingerprint\": pubkey.fingerprint()}\n\n result = registry_service.call_function(function=\"register_service\",\n args=args)\n\n service_uid = result[\"service_uid\"]\n response = result[\"response\"]\n\n service.create_stage2(service_uid=service_uid, response=response)\n\n return service", "def verifiersetter(self, f):\r\n self._verifiersetter = f\r\n return f", "def register(kind, listener):\n assert isinstance(listener, Listener)\n kind = _guard_kind(kind)\n _registered[kind].append(listener)", "def verify(self, verifier: verify_mod.SnapshotVerifier) -> None:\n with self.edenfs() as eden:\n eden.start()\n print(\"Verifing snapshot data:\")\n print(\"=\" * 60)\n self.verify_snapshot_data(verifier, eden)\n print(\"=\" * 60)", "def register_service(self, id, service):\n if id in self.services:\n raise PluginError(\"Existing service: {0}\".format(id))\n\n logger.debug(\"Registering service: {}\".format(id))\n self.services[id] = service", "def register_hosting_service(\n name: str,\n cls: Type[HostingService],\n) -> None:\n cls.hosting_service_id = name\n hosting_service_registry.register(cls)", "def resolve_srv(self, domain, service, protocol, callback):\n raise NotImplementedError", "def test_user_specified_service_v1(self):\n # Ensure that the service name was configured\n from ddtrace import config\n\n assert config.service == \"mysvc\"\n\n client = self.make_client([b\"STORED\\r\\n\", b\"VALUE key 0 5\\r\\nvalue\\r\\nEND\\r\\n\"])\n client.set(b\"key\", b\"value\", noreply=False)\n\n pin = Pin.get_from(pymemcache)\n tracer = pin.tracer\n spans = tracer.pop()\n\n assert spans[0].service == \"mysvc\"", "def test_replace_apiregistration_v1beta1_api_service(self):\n pass", "def verifiergetter(self, f):\r\n self._verifiergetter = f\r\n return f", "def test_dependencyMissing(self):\n with SetAsideModule(\"service_identity\"):\n sys.modules[\"service_identity\"] = None\n\n result = sslverify._selectVerifyImplementation()\n expected = (\n sslverify.simpleVerifyHostname,\n sslverify.simpleVerifyIPAddress,\n sslverify.SimpleVerificationError)\n self.assertEqual(expected, result)", "def advertise_service(self, service, service_type, callback):\n\n rospy.Service(service, service_type, callback)", "def test_create_apiregistration_v1beta1_api_service(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch the status of a registered agent (issuer, holder, or verifier)
async def get_agent_status(self, agent_id: str) -> dict: result = await self._fetch(messages.AgentStatusReq(agent_id), messages.AgentStatus) return result.status
[ "def agent_status(self,idx):\n agent = self.crowd.getAgent(idx)\n # translate a few coordinates\n class status:\n pass\n result = status()\n result.npos = detour2panda(agent.npos)\n tmp = detour2panda(agent.vel)\n result.vel = Vec3(tmp.getX(),tmp.getY(),tmp.getZ())\n return result", "def get_status(self):\r\n\r\n try:\r\n req = self.config.session.get(\r\n self.status_url, verify=self.config.verify, timeout=self.config.timeout)\r\n res = json.loads(req.text)['state']\r\n return res\r\n except requests.exceptions.RequestException as e:\r\n raise VraSdkRequestException(\r\n f'Error requesting status url {self.status_url}: {e}')\r\n except Exception as e:\r\n raise VraSdkMainRequestException(\r\n f'Unmanaged error requesting status url {self.status_url}: {e}')", "def get_agent(self):\n return # osid.authentication.Agent", "def get_status(self) -> NodeManagerStatus:", "def fetch_status():\n return json.loads(requests.get('http://omegle.com/status').text)", "def check_status(self):\n self.logger.debug('Server - td-agent-bit - check_status call.')\n self.change_service_status(\"status\")\n return self.status", "def get_status(self, ij=None, arc=None):\n if not ij is None: return api.get_status(ij[0], ij[1])\n if not arc is None: return api.get_arc_status(arc)\n return api.get_arc_stats()", "def get_status(self):\n result = self.endpoint.get(endpoint=self.name + \"/status\")\n if result.status_code != requests.codes.ok:\n raise Exception(\n \"Error retrieving indexer status. \"\n \"result: {result}, content: {content}\".format(\n result=result, content=result.content)\n )\n\n return json.loads(result.content)", "def get_status(self):\n self.doGet(STATUS_API, DEFAULT_HEADERS)\n self.parse_response_as_json()", "def get_status(self):\n return StatusAPI.from_client(self)", "def status(self, result, config=None):\r\n return result['status']", "def get_status(self):\n return self.client.get_asg_ready(self.env, self.name)", "def test_agents_query(self):\n # Initializing key variables\n response = self.API.get('/infoset/api/v1/agents')\n data = json.loads(response.get_data(as_text=True))\n\n # Verify reponse code\n self.assertEqual(response.status_code, 200)\n\n # Verify response content\n self.assertEqual(isinstance(data, list), True)\n self.assertEqual(data[0]['id_agent'], self.good_agent.id_agent())\n self.assertEqual(data[0]['exists'], self.good_agent.exists())\n self.assertEqual(data[0]['enabled'], self.good_agent.enabled())", "def status(self):\n \n return self._make_request(\"server/status\").json()", "def rpc_status(self, sender, *args):\n \n if (len(args) != 0):\n raise rpc.RPCFault(604, 'status: no arguments')\n dic = {}\n dic['online'] = self.factory.online\n dic['startup_time'] = time.ctime(self.factory.startuptime)\n dic['startup_at'] = volent.descinterval(\n self.factory.startuptime,\n limit=2)\n dic['last_new_bot'] = volent.descinterval(\n self.factory.activitytime,\n limit=2)\n dic['bots_running'] = len(self.factory.actors)\n dic['bots_started'] = self.factory.actorsstarted\n \n return dic", "def get_status(role, domain_class, tab):", "def evaluate_server_agent(self, itr):\n if itr > 0:\n self.pbar.stop()\n\n logger.log(\"Evaluating global agent...\")\n self.server.agent.eval_mode(itr)\n eval_time = -time.time()\n traj_infos = self.server.sampler.evaluate_agent(itr)\n eval_time += time.time()\n\n logger.log(\"Evaluation runs complete.\")\n return traj_infos, eval_time", "async def get_ai_state(self):\n body = [{\"cmd\": \"GetAiState\", \"action\": 0, \"param\": {\"channel\": self._channel}}]\n\n response = await self.send(body)\n if response is None:\n return False\n\n try:\n json_data = json.loads(response)\n\n if json_data is None:\n _LOGGER.error(\n \"Unable to get AI detection state at IP %s\", self._host\n ) \n return self._ai_state\n\n self.map_json_response(json_data)\n except (TypeError, json.JSONDecodeError):\n self.clear_token()\n\n return self._ai_state", "def status(self) -> 'outputs.JobStatusResponse':\n return pulumi.get(self, \"status\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register a credential type for a previouslyregistered issuer
async def register_credential_type(self, issuer_id: str, schema_name: str, schema_version: str, origin_did: str, attr_names: Sequence, config: dict = None, dependencies: list = None) -> None: await self._fetch( messages.RegisterCredentialTypeReq( issuer_id, schema_name, schema_version, origin_did, attr_names, config, dependencies or []), messages.IndyServiceAck)
[ "def register_domain_type(domain_class, type_key):", "def __setitem__(self, issuer_id, key_issuer):\n self._issuers[issuer_id] = key_issuer", "def register_provider(args):\n if len(args) == 0:\n click.echo(\"Usage: mephisto register <provider_type> arg1=value arg2=value\")\n return\n\n from mephisto.abstractions.databases.local_database import LocalMephistoDB\n from mephisto.operations.registry import get_crowd_provider_from_type\n from mephisto.operations.utils import parse_arg_dict, get_extra_argument_dicts\n\n provider_type, requester_args = args[0], args[1:]\n args_dict = dict(arg.split(\"=\", 1) for arg in requester_args)\n\n crowd_provider = get_crowd_provider_from_type(provider_type)\n RequesterClass = crowd_provider.RequesterClass\n\n if len(requester_args) == 0:\n from tabulate import tabulate\n\n params = get_extra_argument_dicts(RequesterClass)\n for param in params:\n click.echo(param[\"desc\"])\n click.echo(tabulate(param[\"args\"].values(), headers=\"keys\"))\n return\n\n try:\n parsed_options = parse_arg_dict(RequesterClass, args_dict)\n except Exception as e:\n click.echo(str(e))\n\n if parsed_options.name is None:\n click.echo(\"No name was specified for the requester.\")\n\n db = LocalMephistoDB()\n requesters = db.find_requesters(requester_name=parsed_options.name)\n if len(requesters) == 0:\n requester = RequesterClass.new(db, parsed_options.name)\n else:\n requester = requesters[0]\n try:\n requester.register(parsed_options)\n click.echo(\"Registered successfully.\")\n except Exception as e:\n click.echo(str(e))", "def ensure_credential_type(self, connection, ct_spec):\n ct = connection.credential_types.find_by_name(ct_spec['name'])\n if ct:\n self.stdout.write(f\"Updating credential type '{ct_spec['name']}'\")\n ct = ct._update(**ct_spec)\n else:\n self.stdout.write(f\"Creating credential type '{ct_spec['name']}'\")\n ct = connection.credential_types.create(**ct_spec)\n return ct", "def register_idpTypeRequest(self, data):\n full_url = self.rest_prefix + OSClient.URI_PROVIDERTYPES\n data = {\n\t\t\"name\": data['name'],\n \t\"resourceTypes\": [\n \t\t{\n \t\t\t\"name\":\"users\",\n \t\t\t\"path\": \"/principals/users\"\n \t\t},\n \t\t{\n \t\t\t\"name\":\"groups\",\n \t\t\t\"path\": \"/principals/groups\"\n \t\t}\n \t\t]\n\t\t\t}\n return requests.post(full_url, headers=OSClient.HEADERS, json=data)", "async def create_rev_reg(request: web.BaseRequest):\n context: AdminRequestContext = request[\"context\"]\n profile = context.profile\n body = await request.json()\n\n credential_definition_id = body.get(\"credential_definition_id\")\n max_cred_num = body.get(\"max_cred_num\")\n\n # check we published this cred def\n async with profile.session() as session:\n storage = session.inject(BaseStorage)\n\n found = await storage.find_all_records(\n type_filter=CRED_DEF_SENT_RECORD_TYPE,\n tag_query={\"cred_def_id\": credential_definition_id},\n )\n if not found:\n raise web.HTTPNotFound(\n reason=f\"Not issuer of credential definition id {credential_definition_id}\"\n )\n\n try:\n revoc = IndyRevocation(profile)\n issuer_rev_reg_rec = await revoc.init_issuer_registry(\n credential_definition_id,\n max_cred_num=max_cred_num,\n notify=False,\n )\n except RevocationNotSupportedError as e:\n raise web.HTTPBadRequest(reason=e.message) from e\n await shield(issuer_rev_reg_rec.generate_registry(profile))\n\n return web.json_response({\"result\": issuer_rev_reg_rec.serialize()})", "def issuer_fingerprint(self, value):\n\n self._issuer_fingerprint.set(value)", "def register_issuer_objects(sender, **kwargs):\n from replicat_documents.models import DocumentIssuerChoice\n\n for key, value in get_document_issuers().items():\n obj, created = DocumentIssuerChoice.objects.update_or_create(\n issuer_module_name=key, app_name=value[\"app_name\"], label=value[\"label\"]\n )\n\n if not created:\n obj.enable()\n\n # Set `enabled=False` for DocumentIssuerChoice instances which no longer have an associated issuer module\n DocumentIssuerChoice.objects.exclude(issuer_module_name__in=get_document_issuers().keys()).update(enabled=False)", "def _cred_types(self):\n # at present the credential type specifications are built from the\n # legacy credential types, but this may change at any point in the\n # future\n # here is what that was in Mar 2022\n # 'user_password': {'fields': ['user', 'password'],\n # 'secret': 'password'},\n # 'token': {'fields': ['token'], 'secret': 'token'},\n # 'git': {'fields': ['user', 'password'], 'secret': 'password'}\n # 'aws-s3': {'fields': ['key_id', 'secret_id', 'session', 'expiration'],\n # 'secret': 'secret_id'},\n # 'nda-s3': {'fields': None, 'secret': None},\n # 'loris-token': {'fields': None, 'secret': None},\n\n if self.__cred_types:\n return self.__cred_types\n\n from datalad.downloaders import CREDENTIAL_TYPES\n mapping = {}\n for cname, ctype in CREDENTIAL_TYPES.items():\n secret_fields = [\n f for f in (ctype._FIELDS or {})\n if ctype._FIELDS[f].get('hidden')\n ]\n mapping[cname] = dict(\n fields=list(ctype._FIELDS.keys()) if ctype._FIELDS else None,\n secret=secret_fields[0] if secret_fields else None,\n )\n # an implementation-independent s3-style credential (with the aim to\n # also work for MinIO and Ceph)\n mapping['s3'] = dict(\n # use boto-style names, but strip \"aws\" prefix, and redundant\n # non-distinguishing 'key' and 'access' terms\n fields=['key', 'secret'],\n secret='secret',\n )\n self.__cred_types = mapping\n return mapping", "def registerAuthenticator(self, auth):\n self.authenticators[auth.getName().upper()] = auth", "def register_calendar_type(\n name: str,\n calendar_type: Type[TradingCalendar],\n force: bool = False\n ) -> None:\n ...", "def register_integration(\n self, key: str, type_: Union[Type[\"Integration\"], LazyLoader]\n ) -> None:\n self._integrations[key] = type_", "def register_alternative_icon_type(self, alternative_icon_type):\n raise NotImplementedError", "def register(cid):\n cred_id=rand.random()*MAX_CRED_ID\n passcode=''.join(rand.choice(string.ascii_uppercase+\\\n string.ascii_lowercase+string.digits)\\\n for _ in range(9))\n stat=sql.SQL(\"INSERT INTO credentials (id, passcode, cred_id) VALUES ({cid}, {passcode}, {credid});\").\\\n format(cid=sql.Literal(cid), \\\n passcode=sql.Literal(passcode), \\\n credid=sql.Literal(cred_id))\n db_log.debug(stat)\n cur.execute(stat)\n return (cred_id, passcode)", "def register(a_type, converter):\n Converters.types[a_type] = converter", "def register_type(self, key, *types):\n assert key in self._config\n self._types[key] = set(types)", "def return_issuer(self, issuer_id):\n _issuer = self._get_issuer(issuer_id)\n if _issuer is None:\n return self._add_issuer(issuer_id)\n return _issuer", "def get_credential(self, credential_type):\n return # object", "def RegisterInputType(input_type, loader):\n valid_input_types[input_type] = loader\n if input_type not in connected_types:\n connected_types[input_type] = set()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register a connection to TheOrgBook as a holder/prover
async def register_orgbook_connection(self, agent_id: str, config: dict = None) -> str: result = await self._fetch( messages.RegisterConnectionReq(ConnectionType.TheOrgBook.value, agent_id, config or {}), messages.ConnectionStatus) return result.connection_id
[ "def make_connection(self, connection):\n connection.make_connection()\n connection.battery.add_house(connection.house)\n connection.house.set_connection()", "def register_computer(self, host, port, comp_id = None):\n if comp_id is None:\n comp_id = str(uuid.uuid4())\n\n context = zmq.Context()\n sock = context.socket(zmq.DEALER)\n sock.connect(\"tcp://%s:%d\"%(host,port))\n\n self._dealers[comp_id] = sock\n\n self.poll.register(sock, zmq.POLLIN)\n\n return comp_id", "async def register_holder_connection(self, agent_id: str, config: dict = None) -> str:\n result = await self._fetch(\n messages.RegisterConnectionReq(ConnectionType.holder.value, agent_id, config or {}),\n messages.ConnectionStatus)\n return result.connection_id", "def registerPool(cls,name,dbPool):\r\n DatabaseRegistry.__registry[name] = dbPool", "def register_client(game_connection):\n log.debug(f\"servers.py:register_client - Adding game {game_connection.uuid} to connections\")\n connections[game_connection.uuid] = game_connection", "def handle_added_connection(event):\n manager = event.newParent\n manager.registerUtility(event.object, IZEOConnection, name=event.object.name)", "def registerCompetitor(tournament_id, competitor_id):\n dbconnection = connect()\n dbcursor = dbconnection.cursor()\n\n dbcursor.execute(\"\"\"INSERT INTO competitors (tournament_id, competitor_id,\n competitor_bye)\n VALUES (%s, %s, %s);\"\"\",\n (tournament_id, competitor_id, False,))\n\n dbconnection.commit()\n dbconnection.close()", "def do_assign_room(self, arg):\n self.dojo.assign_room(arg)", "def __init__ (self, db_con_str = 'dbname=tournament'):\n self.conn = psycopg2.connect(db_con_str)", "def connection(ctx: Context, connection_public_id: PublicId):\n upgrade_item(ctx, \"connection\", connection_public_id)", "def __init__(self, connection):\n self.conn = connection\n self.c = self.conn.cursor()\n self.setup()", "def add_connection(self, cxn):\n self.change_connection(-1, cxn)", "def connection_setup(self):\n\n self.logger.debug(\"Create the connection to the mgr....\")\n # Create a connection to Hal driver mgr\n self.mgrConnection = HalTransport(HalTransport.HalTransportClientMgr,\n HalTransport.HalClientMode,\n disconnectHandlerCb=self.connectionDisconnectCb)\n\n # create the poller\n if self.poller is None:\n self.poller = self.dispatcher.get_poll()\n\n # register the mgr socket\n self.dispatcher.fd_register(self.mgrConnection.socket, self.dispatcher.EV_FD_IN, self.host_management_cb)\n self.dispatcher.fd_register(self.mgrConnection.monitor, self.dispatcher.EV_FD_IN, self.host_management_cb)", "def register(self):\n\n @asyncio.coroutine\n def on_ready(regh, status):\n \"\"\" On_ready for Discovered Topology registration \"\"\"\n self._log.debug(\"PUB reg ready for Discovered Topology handler regn_hdl(%s) status %s\",\n regh, status)\n\n @asyncio.coroutine\n def on_prepare(xact_info, action, ks_path, msg):\n \"\"\" prepare for Discovered Topology registration\"\"\"\n self._log.debug(\n \"Got topology on_prepare callback (xact_info: %s, action: %s): %s\",\n xact_info, action, msg\n )\n\n if action == rwdts.QueryAction.READ:\n \n for name, sdnacct in self._acctstore.items():\n if sdnacct.account_type != \"odl\":\n continue\n sdnintf = sdnacct.sdn\n\n rc, nwtop = sdnintf.get_network_list(sdnacct.sdnal_account_msg)\n #assert rc == RwStatus.SUCCESS\n if rc != RwStatus.SUCCESS:\n self._log.error(\"Fetching get network list for SDN Account %s failed\", name)\n xact_info.respond_xpath(rwdts.XactRspCode.NACK)\n return\n \n self._log.debug(\"Topology: Retrieved network attributes \")\n for nw in nwtop.network:\n # Add SDN account name\n nw.rw_network_attributes.sdn_account_name = name\n nw.server_provided = False\n nw.network_id = name + ':' + nw.network_id\n self._log.debug(\"...Network id %s\", nw.network_id)\n nw_xpath = (\"D,/nd:network[network-id={}]\").format(quoted_key(nw.network_id))\n xact_info.respond_xpath(rwdts.XactRspCode.MORE,\n nw_xpath, nw)\n\n xact_info.respond_xpath(rwdts.XactRspCode.ACK)\n #err = \"%s action on discovered Topology not supported\" % action\n #raise NotImplementedError(err)\n\n self._log.debug(\"Registering for discovered topology using xpath %s\", NwtopDiscoveryDtsHandler.DISC_XPATH)\n\n handler = rift.tasklets.DTS.RegistrationHandler(\n on_ready=on_ready,\n on_prepare=on_prepare,\n )\n\n self._regh = yield from self._dts.register(\n NwtopDiscoveryDtsHandler.DISC_XPATH,\n flags=rwdts.Flag.PUBLISHER,\n handler=handler\n )", "def __init__(self, wallet: Wallet, pool: NodePool, cfg: dict = None) -> None:\n\n LOGGER.debug('HolderProver.__init__ >>> wallet: %s, pool: %s, cfg: %s', wallet, pool, cfg)\n\n super().__init__(wallet, pool)\n self._link_secret = None\n\n self._dir_tails = join(expanduser('~'), '.indy_client', 'tails')\n makedirs(self._dir_tails, exist_ok=True)\n\n self._cfg = cfg or {}\n validate_config('holder-prover', self._cfg)\n\n self._dir_cache = join(expanduser('~'), '.indy_client', 'wallet', self.wallet.name, 'cache')\n makedirs(self._dir_cache, exist_ok=True)\n\n LOGGER.debug('HolderProver.__init__ <<<')", "def __init__(self, pool: NodePool, wallet: Wallet, cfg: dict = None) -> None:\n\n logger = logging.getLogger(__name__)\n logger.debug('HolderProver.__init__: >>> pool: {}, wallet: {}, cfg: {}'.format(pool, wallet, cfg))\n\n super().__init__(pool, wallet, cfg)\n self._master_secret = None\n\n logger.debug('HolderProver.__init__: <<<')", "def create_new_connection(service):\n user = users.get_current_user()\n new_connection = ConnectionRecord()\n new_connection.user = user \n new_connection.service = service\n new_connection.put()", "def _registra_envio(self, path, address):\n ip , port = address\n man = Manager()\n man.insere_fila(path, self.username, ip , port)\n\n\n return", "def test_get_or_create_connector(self):\n remote_id = \"https://example.com/object/1\"\n connector = connector_manager.get_or_create_connector(remote_id)\n self.assertIsInstance(connector, BookWyrmConnector)\n self.assertEqual(connector.identifier, \"example.com\")\n self.assertEqual(connector.base_url, \"https://example.com\")\n\n same_connector = connector_manager.get_or_create_connector(remote_id)\n self.assertEqual(connector.identifier, same_connector.identifier)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register a connection to a local holder agent
async def register_holder_connection(self, agent_id: str, config: dict = None) -> str: result = await self._fetch( messages.RegisterConnectionReq(ConnectionType.holder.value, agent_id, config or {}), messages.ConnectionStatus) return result.connection_id
[ "def __init__(self, conn: \"AcmedaConnection\", addr: str):\n log.info(f\"Registering hub {addr}\")\n self.conn = conn\n self.addr = addr\n self.motors: Dict[str, \"Motor\"] = {}\n\n log.info(f\"Requesting motor info for hub {addr}\")\n asyncio.create_task(self.request_motor_info())", "def test_register_access(self):\n\n with HttpListener(settings.HTTP_AGENT_PORT) as listener:\n response = requests.post(\n \"https://localhost:%s/agent/register/\" % settings.HTTPS_FRONTEND_PORT, verify=False\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(listener.requests), 1)\n self.assertEqual(listener.last_request.path, \"/agent/register/\")", "def register_client(game_connection):\n log.debug(f\"servers.py:register_client - Adding game {game_connection.uuid} to connections\")\n connections[game_connection.uuid] = game_connection", "def register(client):\n # create a health check that consul will use to monitor us\n check_http = consul.Check.http('http://{}:{}'.format(gethostname(), PORT),\n interval='2s')\n\n # register on consul with the health check\n while True:\n try:\n service_id = '{}:{}'.format(gethostname(), PORT)\n client.agent.service.register('ep2016',\n service_id=service_id,\n address=gethostname(),\n port=PORT,\n check=check_http)\n break\n except (ConnectionError, consul.ConsulException) as err:\n print(err)\n print('consul host is down, reconnecting...')\n sleep(0.5)", "def RegisterTunnels(self, neighborRegistry, instance):\n # Not implemented for JunOS\n pass", "def _register_pairing_agent(self):\n if self.pairing_agent is None:\n print(\"registering auto accept pairing agent\")\n path = \"/RPi/Agent\"\n self.pairing_agent = dbus_custom_services.AutoAcceptAgent(self.bus, path)\n # Register application's agent for headless operation\n bt_agent_manager = dbus.Interface(self.bus.get_object(\"org.bluez\", \"/org/bluez\"), \"org.bluez.AgentManager1\")\n bt_agent_manager.RegisterAgent(path, \"NoInputNoOutput\")\n bt_agent_manager.RequestDefaultAgent(path)", "def do_set_monitor_connect(self, args):\n lb = self.findlb(args.loadbalancer)\n monitor = lb.healthmonitor()\n hm = cloudlb.healthmonitor.HealthMonitor(\n type=\"CONNECT\",\n delay=int(args.delay),\n timeout=int(args.timeout),\n attemptsBeforeDeactivation=int(args.attempts)\n )\n monitor.add(hm)", "def registercenter(self):\n\n\t\tlogger = getlogger(\"Register\")\n\t\tlogger.info(\"Starting register center.\")\n\n\t\t# Set up new poll context and initialize it\n\t\tconf = CONF['register']\n\t\tcontext = PollContext((conf['r_addr'], conf.as_int('r_port')))\n\t\tcontext.initialize()\n\n\n\t\t# Main loop\n\t\tlogger.info(\"Register is running\")\n\t\twhile self.running:\n\n\t\t\t# Wait for arrival event\n\t\t\ttry:\n\t\t\t\tresults = context.wait(1000)\n\t\t\texcept Exception, e:\n\t\t\t\tlogger.error(str(e))\n\n\t\t\tfor event, data, sock in results:\n\t\t\t\tif event == \"TIMEOUT\":\n\t\t\t\t\tbreak\n\n\t\t\t\t# New agent coming\n\t\t\t\telif event == \"REGISTER\":\n\n\t\t\t\t\t# Fix name of agent\n\t\t\t\t\tip = sock.getpeername()[0]\n\t\t\t\t\tdata = eval(data)\n\t\t\t\t\tname = data.get('name', '') == '' and ip or data.get('name')\n\n\n\t\t\t\t\t# Get an available worker \n\t\t\t\t\ttry:\n\t\t\t\t\t\ttarget_worker = self.worker_dispatch()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tlogger.exception(str(e))\n\t\t\t\t\t\tsock.send('None')\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\t\t# Insert platform information of agent into database\n\t\t\t\t\ttry:\n\t\t\t\t\t\tself.db.insert_host(name, data)\n\t\t\t\t\texcept Exception ,e:\n\t\t\t\t\t\tlogger.error(str(e))\n\n\t\t\t\t\t# Return worker address \n\t\t\t\t\tworker_addr = target_worker['addr']\n\t\t\t\t\ttarget_worker['agents'] += 1\n\n\t\t\t\t\tsock.send(str(worker_addr))\n\n\t\t\t\t\tlogger.info(\"New agent has registered and send data to worker %s:%s\" % worker_addr)\n\t\t\t\t\tlogger.debug(\"agent %s has registered.\"%name)\n\n\n\t\tlogger.info(threading.currentThread().getName() + \" is closing\")", "def registerAgent(self):\n manager = dbus.Interface(self.bus.get_object(\n SERVICE_NAME, \"/org/bluez\"), \"org.bluez.AgentManager1\")\n manager.RegisterAgent(BluePlayer.AGENT_PATH, BluePlayer.CAPABILITY)\n manager.RequestDefaultAgent(BluePlayer.AGENT_PATH)\n logging.debug(\"Blueplayer is registered as a default agent\")", "def make_connection(self, connection):\n connection.make_connection()\n connection.battery.add_house(connection.house)\n connection.house.set_connection()", "def connect(self, pool_size=20, web_port=21012):\n self.hub.connect(pool_size, web_port=web_port)\n self.client.start()\n self.client.register()", "def _register_agent(self):\n return True", "def _register_mgr_to_agent(self, agent_id):\n if agent_id not in ProcessAgent.AgentName:\n self.logger.error(\"Cannot find the agent ID [%d], fail to register to the agent.\" % agent_id)\n return False\n\n # agent connections\n try:\n api = Transport(\n ProcessAgent.SockPathMapping[agent_id][\"api\"], Transport.REQSOCK,\n Transport.TRANSPORT_CLIENT)\n except zmq.ZMQError as e:\n self.logger.error(\n \"Cannot connect to %s, reason: \"\n \"%s\" % (ProcessAgent.SockPathMapping[agent_id][\"api\"], str(e)))\n return False\n\n try:\n event_send_sock = Transport(\n ProcessAgent.SockPathMapping[agent_id][\"pull\"],\n Transport.PUSHSOCK, Transport.TRANSPORT_CLIENT)\n except zmq.ZMQError as e:\n api.sock.close()\n self.logger.error(\n \"Cannot connect to %s, \"\n \"reason:%s\" % (ProcessAgent.SockPathMapping[agent_id][\"pull\"], str(e)))\n return False\n\n try:\n event_recv_sock = Transport(\n ProcessAgent.SockPathMapping[agent_id][\"push\"],\n Transport.PULLSOCK, Transport.TRANSPORT_SERVER)\n except zmq.ZMQError as e:\n api.sock.close()\n event_send_sock.sock.close()\n self.logger.error(\n \"Cannot connect to %s, \"\n \"reason:%s\" % (ProcessAgent.SockPathMapping[agent_id][\"push\"], str(e)))\n return False\n\n try:\n # create the API connection and register it to the agent\n register_request = agent_pb2.api_request()\n reg = agent_pb2.msg_manager_register()\n reg.id = self.mgr_id\n reg.action = agent_pb2.msg_manager_register.REG\n reg.path_info = ProcessAgent.SockPathMapping[agent_id][\"push\"]\n register_request.mgr_reg.CopyFrom(reg)\n data = register_request.SerializeToString()\n api.sock.send(data)\n\n self.dispatcher.fd_register(\n event_recv_sock.sock, Dispatcher.EV_FD_IN | Dispatcher.EV_FD_ERR,\n self._handle_agent_event)\n\n self.process_agent_db[agent_id] = {\n \"status\": self.REGISTER_INITIATED_STATUS,\n \"apiSock\": api,\n \"sendSock\": event_send_sock,\n \"recvSock\": event_recv_sock,\n \"ka_stat\": 3, # 3 retries\n }\n # wait and check the register status\n handled = False\n for i in range(self.MGR_REGISTER_TIMEOUT * 10): # multiply 10 for fine granularity\n time.sleep(0.1)\n if self._check_mgr_register_status(agent_id):\n handled = True\n break\n if not handled:\n return False\n\n return True\n except Exception as e:\n self.logger.error(\n \"Got an exception when registering mgr to agent %s, reason: %s\", agent_id, str(e))\n return False", "def handle_added_connection(event):\n manager = event.newParent\n manager.registerUtility(event.object, IZEOConnection, name=event.object.name)", "def registerEngine(remoteEngine, id):", "def connectionMade(self):\n self.connectToSelf = self.hijacker.clientBase.connect(MobileCodeClient(), \n self.hijacker.clientBase.getAddress(), \n 100)", "async def register(self, poller, controller, keepalive=10):\n url = \"http://{}:{}/pollers/register\".format(controller[0], controller[1])\n\n headers = {'content-type': 'application/json'}\n payload = {'name': poller[0],\n 'ip': poller[1],\n 'port': poller[2]}\n\n with aiohttp.ClientSession() as session:\n while True:\n logger.debug('Registering/keepalive to controller {}'.format(controller))\n async with session.post(url, data=json.dumps(payload), headers=headers) as response:\n logger.debug('Controller response {}'.format(response.json()))\n\n await asyncio.sleep(keepalive)", "def connection_setup(self):\n\n self.logger.debug(\"Create the connection to the mgr....\")\n # Create a connection to Hal driver mgr\n self.mgrConnection = HalTransport(HalTransport.HalTransportClientMgr,\n HalTransport.HalClientMode,\n disconnectHandlerCb=self.connectionDisconnectCb)\n\n # create the poller\n if self.poller is None:\n self.poller = self.dispatcher.get_poll()\n\n # register the mgr socket\n self.dispatcher.fd_register(self.mgrConnection.socket, self.dispatcher.EV_FD_IN, self.host_management_cb)\n self.dispatcher.fd_register(self.mgrConnection.monitor, self.dispatcher.EV_FD_IN, self.host_management_cb)", "def register(self):\n if self.hub.is_connected:\n if self._private_key is not None:\n raise SAMPClientError(\"Client already registered\")\n\n result = self.hub.register(\"Astropy SAMP Web Client\")\n\n if result[\"samp.self-id\"] == \"\":\n raise SAMPClientError(\n \"Registration failed - samp.self-id was not set by the hub.\"\n )\n\n if result[\"samp.private-key\"] == \"\":\n raise SAMPClientError(\n \"Registration failed - samp.private-key was not set by the hub.\"\n )\n\n self._public_id = result[\"samp.self-id\"]\n self._private_key = result[\"samp.private-key\"]\n self._hub_id = result[\"samp.hub-id\"]\n\n if self._callable:\n self._declare_subscriptions()\n self.hub.allow_reverse_callbacks(self._private_key, True)\n\n if self._metadata != {}:\n self.declare_metadata()\n\n self._is_registered = True\n # Let the client thread proceed\n self._registered_event.set()\n\n else:\n raise SAMPClientError(\n \"Unable to register to the SAMP Hub. Hub proxy not connected.\"\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch the status of a registered connection
async def get_connection_status(self, connection_id: str) -> dict: result = await self._fetch( messages.ConnectionStatusReq(connection_id), messages.ConnectionStatus) return result.status
[ "def get_connection_status():\n command = ['ping', '-c', '2', '-I', '3g-wan', '-W', '1', '8.8.8.8']\n return run_command(command)", "def GetConnectionStatus(self):\n return [self.connection_state, self.connection_info]", "def status(self):\n connection = self.connection()\n\n # the connection instance was destroyed but someone kept\n # a separate reference to the monitor for some reason\n if not connection:\n return self.DISCONNECTED\n\n # connection cleanly disconnected or not yet opened\n if not connection.ws:\n return self.DISCONNECTED\n\n # close called but not yet complete\n if self.close_called.is_set():\n return self.DISCONNECTING\n\n # connection closed uncleanly (we didn't call connection.close)\n stopped = connection._receiver_task.stopped.is_set()\n if stopped or not connection.ws.open:\n return self.ERROR\n\n # everything is fine!\n return self.CONNECTED", "def connectivity_status(self) -> str:\n return pulumi.get(self, \"connectivity_status\")", "def status(self):\n response = requests.get(\"http://%s:%d/v1/status\" % (self.propsd_server, self.propsd_port))\n return json.loads(response.text)", "def dumpConnectionStatus():\n print(\"+++ ALL CONNECTIONS +++\")\n for connection in allInstancesOf(DiagnosticConnectionWrapper):\n print(connection.label, connection.state)\n print(\"--- CONNECTIONS END ---\")", "def get_state(self):\n if self.connected is True:\n return self.__request(\n WemoSwitch.body_status, WemoSwitch.headers_get)\n else:\n return WemoSwitch.ERROR_STATE", "def get_status(self):\r\n\r\n try:\r\n req = self.config.session.get(\r\n self.status_url, verify=self.config.verify, timeout=self.config.timeout)\r\n res = json.loads(req.text)['state']\r\n return res\r\n except requests.exceptions.RequestException as e:\r\n raise VraSdkRequestException(\r\n f'Error requesting status url {self.status_url}: {e}')\r\n except Exception as e:\r\n raise VraSdkMainRequestException(\r\n f'Unmanaged error requesting status url {self.status_url}: {e}')", "def status(self):\n \n return self._make_request(\"server/status\").json()", "def check_status(self):\n self.logger.debug('Server - td-agent-bit - check_status call.')\n self.change_service_status(\"status\")\n return self.status", "def get_status(self) -> NodeManagerStatus:", "def get_status():\n \n return db.get_db().getRoot().getS(ns.l2tpDeviceStatus, rdf.Type(ns.L2tpDeviceStatus))", "def GetStatus(self):\n self.__SendMsg(\"status\")\n ##TODO: Parse the response into some struct so it can be queried later.\n\n ## \"Status\" is the only command that returns a multi\n ## line response so handle it separately.\n response = \"\"\n while(self.SocketIsReadable()):\n data = self.my_Socket.recv(1)\n if not data:\n break\n else:\n response += data.decode(\"UTF-8\")\n return response", "def get_device_status(self):\n self.i2c_writer.write('Status\\00')\n time.sleep(0.5) # 'Status' command requires 300ms timeout for response\n return self.read()", "def new_connection_status(self, connection_status):\n print('new_connection_status', self.device.name, connection_status)", "def update_connection_status(self):\n connected = self.connected()\n if not connected:\n self.SetStatusText(\"\")\n self.set_instance_string(\"\")\n self.update_title()\n# if connecting or connected, message may vary, so don't change it here\n menu = self.get_menu_by_name(\"Connection\")\n connect_item = self.find_item_by_label(menu, \"Connect\")\n connect_item.Enable(not connected)\n connect_test_item = self.find_item_by_label(menu, \"Run regression tests\")\n connect_test_item.Enable(not connected)\n disconnect_item = self.find_item_by_label(menu, \"Disconnect\")\n disconnect_item.Enable(connected)", "def wifi_status(self) -> str:\n self._logger.info(\"Retrieving WiFi connection status...\")\n inverse_wifi_statuses = {v: k for k, v in self._wifi_statuses.items()}\n response = self._send(\"wlanGetConnectState\").content.decode(\"utf-8\")\n try:\n return inverse_wifi_statuses[response]\n except KeyError:\n raise linkplayctl.APIException(\"Received unrecognized wifi status: '\"+str(response)+\"'\")", "def get_status (self):\n return self.__status", "def get_network_status(self, network):\n\n with self._lock:\n with sqlite3.connect(self._database_name) as connection:\n status_query = connection.execute(f\"SELECT * from networks WHERE name='{network}'\")\n\n return status_query.fetchone()[4]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a credential request for an issuer service
async def create_credential_request(self, holder_id: str, cred_offer: dict, cred_def_id: str) -> messages.CredentialRequest: return await self._fetch( messages.GenerateCredentialRequestReq( holder_id, messages.CredentialOffer(cred_offer, cred_def_id)), messages.CredentialRequest)
[ "def req_cred(email, cred, url):\n try:\n req = url + '/accounts/service_credential_request?email=' + email\n res = requests.post(req)\n render_res(res)\n except CLIError as e:\n render_error(e.asdict())", "def _generate_credential() -> dict:\n\n return {\n \"accounts\": {}\n }", "def _makeRequest(self, resourceContentsStr, issuer):\n xacmlContextRequest = XacmlSamlPepFilter._make_xacml_context_request(\n httpMethod='POST',\n resourceURI=self.RESOURCE_URI,\n resourceContents=resourceContentsStr,\n subjectID=self.SUBJECT_ID,\n subjectIdFormat=self.SUBJECT_ID_FORMAT,\n actions=[])\n\n query = self._createAuthzDecisionQuery(issuer)\n query.xacmlContextRequest = xacmlContextRequest\n\n request = self._makeRequestForQuery(query)\n\n header = {\n 'soapAction': \"http://www.oasis-open.org/committees/security\",\n 'Content-length': str(len(request)),\n 'Content-type': 'text/xml'\n }\n return (header, request)", "def makeCertRequest(cn):\n key = KeyPair.generate()\n return key.certificateRequest(DN(CN=cn))", "def test_create_certificate_signing_request(self):\n pass", "async def create_rev_reg(request: web.BaseRequest):\n context: AdminRequestContext = request[\"context\"]\n profile = context.profile\n body = await request.json()\n\n credential_definition_id = body.get(\"credential_definition_id\")\n max_cred_num = body.get(\"max_cred_num\")\n\n # check we published this cred def\n async with profile.session() as session:\n storage = session.inject(BaseStorage)\n\n found = await storage.find_all_records(\n type_filter=CRED_DEF_SENT_RECORD_TYPE,\n tag_query={\"cred_def_id\": credential_definition_id},\n )\n if not found:\n raise web.HTTPNotFound(\n reason=f\"Not issuer of credential definition id {credential_definition_id}\"\n )\n\n try:\n revoc = IndyRevocation(profile)\n issuer_rev_reg_rec = await revoc.init_issuer_registry(\n credential_definition_id,\n max_cred_num=max_cred_num,\n notify=False,\n )\n except RevocationNotSupportedError as e:\n raise web.HTTPBadRequest(reason=e.message) from e\n await shield(issuer_rev_reg_rec.generate_registry(profile))\n\n return web.json_response({\"result\": issuer_rev_reg_rec.serialize()})", "def create_credential(platform,username,email,password):\n new_credential = Credential(platform,username,email,password)\n return new_credential", "def make_cred(**kwargs):\n\n non_pass_fields = [\n (\"Service name\", \"service_name\"),\n (\"Username\", \"username\"),\n (\"Other info\", \"other_info\"),\n ]\n\n new_kwargs = {}\n\n for name, field in non_pass_fields:\n if (field not in kwargs.keys()) or (not kwargs[field]):\n new_kwargs[field] = raw_input(\"%s: \" % name)\n else:\n new_kwargs[field] = kwargs[field]\n\n if (\"password\" not in kwargs.keys()) or (not kwargs[\"password\"]):\n prompt = \"Password for %s@%s\" \\\n % (new_kwargs['username'], new_kwargs['service_name'])\n new_kwargs[\"password\"] = InteractionUtility.new_pass_confirm(prompt)\n\n return Credential(**new_kwargs)", "def test_create_agent_certificate_signing_request(self):\n pass", "def Grant(self, request, ssl_cert=None, ssl_key=None):\n pass", "def mk_cacert(issuer, request, private_key):\n pkey = request.get_pubkey()\n cert = X509.X509()\n cert.set_serial_number(1)\n cert.set_version(2)\n mk_cert_valid(cert)\n cert.set_issuer(issuer)\n cert.set_subject(cert.get_issuer())\n cert.set_pubkey(pkey)\n cert.add_ext(X509.new_extension('basicConstraints', 'CA:TRUE'))\n cert.add_ext(X509.new_extension('subjectKeyIdentifier', cert.get_fingerprint()))\n cert.sign(private_key, 'sha256')\n return cert, private_key, pkey", "def generateRequest(self, username = None, password = None):\r\n uname = quote_plus(client_string(self.__configuration) + '\\\\' + username)\r\n data = 'grant_type=%s&client_id=%s&username=%s&password=%s' % (OAuth2PasswordService.GRANT_TYPE,\r\n quote_plus(application_id(self.__configuration)),\r\n uname,\r\n quote_plus(password))\r\n return make_oauth2request(OAUTH_URL, data)", "def CreateSmtpCredential(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "async def create_cred_req(self, cred_offer_json: str, cd_id: str, my_did: str = None) -> (str, str):\n\n LOGGER.debug(\n 'HolderProver.create_cred_req >>> cred_offer_json: %s, cd_id: %s, my_did %s',\n cred_offer_json,\n cd_id,\n my_did)\n\n if not ok_cred_def_id(cd_id):\n LOGGER.debug('HolderProver.create_cred_req <!< Bad cred def id %s', cd_id)\n raise BadIdentifier('Bad cred def id {}'.format(cd_id))\n\n if not self.wallet.handle:\n LOGGER.debug('HolderProver.create_cred_req <!< Wallet %s is closed', self.name)\n raise WalletState('Wallet {} is closed'.format(self.name))\n\n label = await self._assert_link_secret('create_cred_req')\n\n # Check that ledger has schema on ledger where cred def expects - in case of pool reset with extant wallet\n cred_def_json = await self.get_cred_def(cd_id)\n schema_seq_no = int(json.loads(cred_def_json)['schemaId'])\n schema_json = await self.get_schema(schema_seq_no)\n schema = json.loads(schema_json)\n if not schema:\n LOGGER.debug(\n 'HolderProver.create_cred_req <!< absent schema@#%s, cred req may be for another ledger',\n schema_seq_no)\n raise AbsentSchema('Absent schema@#{}, cred req may be for another ledger'.format(schema_seq_no))\n (cred_req_json, cred_req_metadata_json) = await anoncreds.prover_create_credential_req(\n self.wallet.handle,\n my_did or self.did,\n cred_offer_json,\n cred_def_json,\n label)\n rv = (cred_req_json, cred_req_metadata_json)\n\n LOGGER.debug('HolderProver.create_cred_req <<< %s', rv)\n return rv", "def create_certificate_signing_request(*props): # pylint: disable=unused-argument\n pass", "async def _create_client(self):\n frozen_credentials = (\n await self._source_credentials.get_frozen_credentials()\n )\n return self._client_creator(\n 'sts',\n aws_access_key_id=frozen_credentials.access_key,\n aws_secret_access_key=frozen_credentials.secret_key,\n aws_session_token=frozen_credentials.token,\n )", "def obtain_credential(\n pk: PublicKey,\n response: BlindSignature,\n state: RequestState\n ) -> AnonymousCredential:\n\n signature1, signature2 = jsonpickle.decode(response[0][0]), jsonpickle.decode(response[0][1])\n\n t = jsonpickle.decode(state[0])\n\n #compute final siganture with the t sampled during the issue request\n final_signature = (jsonpickle.encode(signature1)\n ,jsonpickle.encode(signature2/(signature1.pow(t))))\n\n # getting the ordered list of credentials from issuer and user attributes\n issuer_attributes = response[1]\n user_attributes = state[1]\n\n credentials_dic = dict(issuer_attributes)\n credentials_dic.update(user_attributes)\n\n #putting them in the right order (order is very important, since part of the signature on the credentials is based on it)\n credentials = []\n for i in sorted (credentials_dic.keys()):\n credentials.append(credentials_dic[i])\n\n #checking if signature is valid for these credentials\n assert verify(pk, final_signature, credentials)\n\n return credentials, final_signature", "def create_cred(self):\n\n cred_filename = 'resources/cred/CredFile.ini'\n\n with open(cred_filename, 'w') as file_in:\n file_in.write(\"#Credential file:\\nUsername={}\\nAPI_Key={}\\nCustomer_ID={}\\nExpiry={}\\n\"\n .format(self.__username, self.__api_key, self.__customer_ID, self.__time_of_exp))\n file_in.write(\"++\" * 20)\n\n # If there exists an older key file, This will remove it.\n if os.path.exists(self.__key_file):\n os.remove(self.__key_file)\n\n # Open the Key.key file and place the key in it.\n # The key file is hidden.\n try:\n os_type = sys.platform\n if os_type == 'linux':\n self.__key_file = '.' + self.__key_file\n\n with open(self.__key_file, 'w') as key_in:\n key_in.write(self.__key.decode())\n # Hidding the key file. The below code snippet finds out which current os the scrip is running on and\n # does the taks base on it.\n if os_type == 'win32':\n ctypes.windll.kernel32.SetFileAttributesW(self.__key_file, 2)\n else:\n pass\n\n except PermissionError:\n os.remove(self.__key_file)\n print(\"A Permission error occurred.\\n Please re run the script\")\n sys.exit()\n\n self.__username = \"\"\n self.__customer_ID = \"\"\n self.__key = \"\"\n self.__key_file", "def _make_delegated_credentials(credentials, user_email, scopes):\n request = requests.Request()\n credentials = with_scopes_if_required(credentials, _TOKEN_SCOPE)\n credentials.refresh(request)\n email = credentials.service_account_email\n signer = iam.Signer(request, credentials, email)\n return service_account.Credentials(signer,\n email,\n _TOKEN_URI,\n scopes=scopes,\n subject=user_email)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets credentials for a given organization
async def get_org_credentials(self, connection_id: str, org_name: str) -> messages.OrganizationCredentials: return await self._fetch(messages.OrganizationCredentialsReq(connection_id, org_name))
[ "def find_credentials(account):\n return Credentials.find_by_account(account)", "def find_credentials(account):\n\treturn Credentials.find_credentials(account)", "def _get_credentials():\n if not CONFIG:\n raise ConfigError(\"Configuration is not passed\")\n\n try:\n return CONFIG[\"credentials\"]\n except KeyError:\n raise ConfigError(\"Credentials configurations are missing from config\")", "def get_google_creds(username, widget):\n if widget == Widgets.SHIFTS:\n gcal_logger.info(\"Retrieving sys_user google creds.\")\n return session.get('sys_google_creds', None)\n else:\n gcal_logger.info(\"Retrieving connected user google creds.\")\n return session.get('google_creds', None)", "def get_credentials(cls, repo=None):\n if repo:\n _git = repo.git\n else:\n _git = Git(os.getcwd())\n return cls(\n user=_git.config('github.user', with_exceptions=False),\n token=_git.config('github.token', with_exceptions=False)\n )", "def fetch_organization(org):\n gh_inst = _get_github_instance()\n return gh_inst.get_organization(org)", "def find_credential(account):\n return Credentials.find_by_account(account)", "def get_credentials(sandbox=True):\n credentials = {\n \"sandbox\": {\n \"partner_id\": os.getenv(\"FNAC_SANDBOX_PARTNER_ID\"),\n \"shop_id\": os.getenv(\"FNAC_SANDBOX_SHOP_ID\"),\n \"key\": os.getenv(\"FNAC_SANDBOX_KEY\"),\n },\n \"real\": {\n \"partner_id\": os.getenv(\"FNAC_PARTNER_ID\"),\n \"shop_id\": os.getenv(\"FNAC_SHOP_ID\"),\n \"key\": os.getenv(\"FNAC_KEY\"),\n },\n }\n use_sandbox = {True: \"sandbox\", False: \"real\"}\n account_type = use_sandbox[sandbox]\n return credentials[account_type]", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'credentials.json')\r\n \r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "async def _get_credentials(self):\n config = Config(\n signature_version=UNSIGNED,\n region_name=self._sso_region,\n )\n async with self._client_creator('sso', config=config) as client:\n if self._token_provider:\n initial_token_data = self._token_provider.load_token()\n token = (await initial_token_data.get_frozen_token()).token\n else:\n token = self._token_loader(self._start_url)['accessToken']\n\n kwargs = {\n 'roleName': self._role_name,\n 'accountId': self._account_id,\n 'accessToken': token,\n }\n try:\n response = await client.get_role_credentials(**kwargs)\n except client.exceptions.UnauthorizedException:\n raise UnauthorizedSSOTokenError()\n credentials = response['roleCredentials']\n\n credentials = {\n 'ProviderType': 'sso',\n 'Credentials': {\n 'AccessKeyId': credentials['accessKeyId'],\n 'SecretAccessKey': credentials['secretAccessKey'],\n 'SessionToken': credentials['sessionToken'],\n 'Expiration': self._parse_timestamp(\n credentials['expiration']\n ),\n },\n }\n return credentials", "def get_creds():\n username, _, password = netrc().authenticators(\"ligo.org\")\n if not username:\n raise IOError(\"Can't find a username for ligo.org in ~/.netrc.\")\n if not password:\n raise IOError(\"Can't find a password for ligo.org in ~/.netrc.\")\n return username, password", "async def get_filtered_credentials(self, connection_id: str, org_name: str, proof_name: str, fetch_all: bool) -> messages.OrganizationCredentials:\n return await self._fetch(messages.FilterCredentialsReq(connection_id, org_name, proof_name, fetch_all))", "def get_credentials_from_keyring(platform):\n username, password = None, None\n\n if keyring.get_keyring():\n username = keyring.get_password(platform, 'username')\n if username is not None:\n password = keyring.get_password(platform, username)\n\n if username is None or password is None:\n return None\n\n return username, password", "def _get_credentials(self):\n cred = {\n 'username': self.username,\n 'password': self.password,\n 'host': self.host,\n 'port': self.port,\n }\n return cred", "def find_credentials(platform):\n return Credential.find_by_platform(platform)", "def listCredentials():\n sql = 'SELECT mail,tel,credential FROM authorization'\n query = AppQuery()\n try:\n return query.query(sql)\n finally:\n query.close()", "def find_credentials(cls,account_type):\n for credentials in cls.credentials_list:\n if credentials.account_type == account_type:\n return credentials", "def get_credentials():\r\n home_dir = os.getcwd()\r\n credential_path = os.path.join(home_dir,\r\n 'credential.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n flags.noauth_local_webserver = True\r\n credentials = tools.run_flow(flow, store, flags)\r\n logging.info('Storing credentials to ' + credential_path)\r\n return credentials", "def _get_dcos_acs_auth_creds(username, password, hostname):\n\n if password is None:\n username, password = _get_auth_credentials(username, hostname)\n return {\"uid\": username, \"password\": password}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets credentials for a given organization and proof request
async def get_filtered_credentials(self, connection_id: str, org_name: str, proof_name: str, fetch_all: bool) -> messages.OrganizationCredentials: return await self._fetch(messages.FilterCredentialsReq(connection_id, org_name, proof_name, fetch_all))
[ "def extractCredentials(self, request):\n creds={}\n identity=request.form.get(\"__ac_identity_url\", \"\").strip()\n if identity != \"\":\n self.initiateChallenge(identity)\n return creds\n\n self.extractOpenIdServerResponse(request, creds)\n return creds", "def get_credentials_requests():\n gqlapi = gql.get_api()\n return gqlapi.query(CREDENTIALS_REQUESTS_QUERY)[\"credentials_requests\"]", "async def get_org_credentials(self, connection_id: str, org_name: str) -> messages.OrganizationCredentials:\n return await self._fetch(messages.OrganizationCredentialsReq(connection_id, org_name))", "def get_credentials(self, request):\n auth_header = request.headers.get('Authorization')\n\n if auth_header:\n (scheme, base64_raw) = auth_header.split(' ')\n\n if scheme == 'Basic':\n return b64decode(base64_raw).split(':')\n return (None, None)", "def get_credentials(req):\n credential_header = req.headers.get(\"RStudio-Connect-Credentials\")\n if not credential_header:\n return {}\n return json.loads(credential_header)", "def obtain_credential(\n pk: PublicKey,\n response: BlindSignature,\n state: RequestState\n ) -> AnonymousCredential:\n\n signature1, signature2 = jsonpickle.decode(response[0][0]), jsonpickle.decode(response[0][1])\n\n t = jsonpickle.decode(state[0])\n\n #compute final siganture with the t sampled during the issue request\n final_signature = (jsonpickle.encode(signature1)\n ,jsonpickle.encode(signature2/(signature1.pow(t))))\n\n # getting the ordered list of credentials from issuer and user attributes\n issuer_attributes = response[1]\n user_attributes = state[1]\n\n credentials_dic = dict(issuer_attributes)\n credentials_dic.update(user_attributes)\n\n #putting them in the right order (order is very important, since part of the signature on the credentials is based on it)\n credentials = []\n for i in sorted (credentials_dic.keys()):\n credentials.append(credentials_dic[i])\n\n #checking if signature is valid for these credentials\n assert verify(pk, final_signature, credentials)\n\n return credentials, final_signature", "def extractCredentials( self, request ):\n if not request._auth or not request._auth.startswith(self.auth_scheme):\n return None\n \n ticket = request._auth[len(self.auth_scheme)+1:]\n\n creds = {}\n creds['ticket'] = ticket\n creds['plugin'] = self.getId()\n\n return creds", "def get_credentials(sandbox=True):\n credentials = {\n \"sandbox\": {\n \"partner_id\": os.getenv(\"FNAC_SANDBOX_PARTNER_ID\"),\n \"shop_id\": os.getenv(\"FNAC_SANDBOX_SHOP_ID\"),\n \"key\": os.getenv(\"FNAC_SANDBOX_KEY\"),\n },\n \"real\": {\n \"partner_id\": os.getenv(\"FNAC_PARTNER_ID\"),\n \"shop_id\": os.getenv(\"FNAC_SHOP_ID\"),\n \"key\": os.getenv(\"FNAC_KEY\"),\n },\n }\n use_sandbox = {True: \"sandbox\", False: \"real\"}\n account_type = use_sandbox[sandbox]\n return credentials[account_type]", "def retrieve_auth():\r\n\r\n get_kwargs = lambda envargs: dict([(arg.kwarg, os.environ.get(arg.envarg))\r\n for arg in envargs])\r\n\r\n wc_kwargs = get_kwargs(wc_envargs)\r\n mm_kwargs = get_kwargs(mm_envargs)\r\n\r\n if not all([wc_kwargs[arg] for arg in ('email', 'password')]):\r\n if os.environ.get('TRAVIS'):\r\n print 'on Travis but could not read auth from environ; quitting.'\r\n sys.exit(1)\r\n\r\n wc_kwargs.update(zip(['email', 'password'], prompt_for_wc_auth()))\r\n\r\n if mm_kwargs['oauth_credentials'] is None:\r\n # ignoring race\r\n if not os.path.isfile(OAUTH_FILEPATH):\r\n raise ValueError(\"You must have oauth credentials stored at the default\"\r\n \" path by Musicmanager.perform_oauth prior to running.\")\r\n del mm_kwargs['oauth_credentials'] # mm default is not None\r\n else:\r\n mm_kwargs['oauth_credentials'] = \\\r\n credentials_from_refresh_token(mm_kwargs['oauth_credentials'])\r\n\r\n return (wc_kwargs, mm_kwargs)", "def getCreds():\n key = os.environ['ConsumerKey']\n secret = os.environ['ConsumerSecret']\n\n return (key, secret)", "def find_credentials(platform):\n return Credential.find_by_platform(platform)", "def _get_dcos_oauth_creds(dcos_url):\n\n oauth_login = 'login?redirect_uri=urn:ietf:wg:oauth:2.0:oob'\n url = urllib.parse.urljoin(dcos_url, oauth_login)\n msg = \"\\n{}\\n\\n {}\\n\\n{} \".format(\n \"Please go to the following link in your browser:\",\n url,\n \"Enter authentication token:\")\n sys.stderr.write(msg)\n sys.stderr.flush()\n token = sys.stdin.readline().strip()\n return {\"token\": token}", "def _generate_credential() -> dict:\n\n return {\n \"accounts\": {}\n }", "def extractCredentials(self, request):\n session = ISession(request)[SESSION_KEY]\n requestToken = session.get(REQUEST_TOKEN_KEY)\n accessToken = session.get(ACCESS_TOKEN_KEY)\n if not requestToken and not accessToken:\n return None\n return TwitterCredentials(requestToken, accessToken)", "def _get_credentials(self):\n cred = {\n 'username': self.username,\n 'password': self.password,\n 'host': self.host,\n 'port': self.port,\n }\n return cred", "async def _get_credentials(self):\n config = Config(\n signature_version=UNSIGNED,\n region_name=self._sso_region,\n )\n async with self._client_creator('sso', config=config) as client:\n if self._token_provider:\n initial_token_data = self._token_provider.load_token()\n token = (await initial_token_data.get_frozen_token()).token\n else:\n token = self._token_loader(self._start_url)['accessToken']\n\n kwargs = {\n 'roleName': self._role_name,\n 'accountId': self._account_id,\n 'accessToken': token,\n }\n try:\n response = await client.get_role_credentials(**kwargs)\n except client.exceptions.UnauthorizedException:\n raise UnauthorizedSSOTokenError()\n credentials = response['roleCredentials']\n\n credentials = {\n 'ProviderType': 'sso',\n 'Credentials': {\n 'AccessKeyId': credentials['accessKeyId'],\n 'SecretAccessKey': credentials['secretAccessKey'],\n 'SessionToken': credentials['sessionToken'],\n 'Expiration': self._parse_timestamp(\n credentials['expiration']\n ),\n },\n }\n return credentials", "def get(self, request, **kwargs):\n qbo_credentials = QBOCredential.objects.get(workspace=kwargs['workspace_id'], is_expired=False)\n\n return Response(data=QBOCredentialSerializer(qbo_credentials).data, status=status.HTTP_200_OK if qbo_credentials.refresh_token else status.HTTP_400_BAD_REQUEST)", "def find_credentials(account):\n\treturn Credentials.find_credentials(account)", "def get_authentication():\n token = load_account()\n if token is not None:\n return get_token_authentication(token)\n else:\n if QI_EMAIL is None or QI_PASSWORD is None:\n print('Enter email:')\n email = input()\n print('Enter password')\n password = getpass()\n else:\n email, password = QI_EMAIL, QI_PASSWORD\n return get_basic_authentication(email, password)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a credentials dependencies
async def get_credential_dependencies(self, name: str, version: str = None, origin_did: str = None, dependency_graph=None, visited_dids=None) -> messages.CredentialDependencies: return await self._fetch( messages.CredentialDependenciesReq( name, version, origin_did, dependency_graph, visited_dids))
[ "def getCreds():\n key = os.environ['ConsumerKey']\n secret = os.environ['ConsumerSecret']\n\n return (key, secret)", "def get_credentials_requests():\n gqlapi = gql.get_api()\n return gqlapi.query(CREDENTIALS_REQUESTS_QUERY)[\"credentials_requests\"]", "def getCredentials(self):\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server()\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('tasks', 'v1', credentials=creds)\n return service", "def _get_credentials():\n if not CONFIG:\n raise ConfigError(\"Configuration is not passed\")\n\n try:\n return CONFIG[\"credentials\"]\n except KeyError:\n raise ConfigError(\"Credentials configurations are missing from config\")", "def _get_credentials(\n credentials_name: str, credentials: dict[str, Any]\n) -> dict[str, Any]:\n try:\n return credentials[credentials_name]\n except KeyError as exc:\n raise KeyError(\n f\"Unable to find credentials '{credentials_name}': check your data \"\n \"catalog and credentials configuration. See \"\n \"https://kedro.readthedocs.io/en/stable/kedro.io.DataCatalog.html \"\n \"for an example.\"\n ) from exc", "def credentials(self):\n ret = {k: self.AUTH_ARGS.get(k, \"\") for k in self.CRED_KEYS}\n return ret", "def find_credentials(account):\n\treturn Credentials.find_credentials(account)", "async def load_credentials(self):\n # First provider to return a non-None response wins.\n for provider in self.providers:\n logger.debug(\"Looking for credentials via: %s\", provider.METHOD)\n creds = await provider.load()\n if creds is not None:\n return creds\n\n # If we got here, no credentials could be found.\n # This feels like it should be an exception, but historically, ``None``\n # is returned.\n #\n # +1\n # -js\n return None", "def find_credentials(cls,account_type):\n for credentials in cls.credentials_list:\n if credentials.account_type == account_type:\n return credentials", "def credentials(self):\n return self._data.get('credentials')", "def get_credentials():\n credential_path = CREDENTIALS_DIR + 'google-credentials.json'\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_PATH, SCOPES)\n flow.user_agent = APPLICATION_NAME\n flags = tools.argparser.parse_args(args=[])\n credentials = tools.run_flow(flow, store, flags)\n print('Storing credentials to ' + credential_path)\n return credentials", "def find_credentials(account):\n return Credentials.find_by_account(account)", "def find_credentials(platform):\n return Credential.find_by_platform(platform)", "def _get_credentials(self):\n cred = {\n 'username': self.username,\n 'password': self.password,\n 'host': self.host,\n 'port': self.port,\n }\n return cred", "def get_credentials_module():\n import inspect\n import importlib\n import os\n\n return importlib.import_module(\n '%(module)s.configs.%(stage)s' % {\n # inspect the stack and get the calling module (<myproject>.settings)\n 'module': inspect.getmodule(inspect.stack()[1][0]).__name__.split('.')[0],\n 'stage': os.environ['DJANGO_CONFIGURATION']\n }\n )", "def _get_credentials(provider, credentials_file_path=None):\n # If a user provided a file, use it.\n # If not, iterate through a list of potential file locations\n # Open the file and look for the provider's specific settings.\n # If it wasn't found, move to the next file.\n # If credentials for that provider weren't found, abort, else return them.\n # TODO: Allow to pass a parser function to parse the file.\n\n credentials_file_paths = [credentials_file_path] or CREDENTIALS_FILE_PATHS\n provider_credentials = {}\n for path in credentials_file_paths:\n if os.path.isfile(path):\n credentials = _load_credentials_file()\n try:\n provider_credentials = credentials[provider]\n # Only return if provider credentials are not nothing\n if provider_credentials:\n ctx.logger.info(\n 'Credentials for {0} found under {1}'.format(\n provider, path))\n return provider_credentials\n except ValueError:\n ctx.logger.debug(\n 'Credentials for {0} were not found under {1}'.format(\n provider, path))\n return {}", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'credentials.json')\r\n \r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def get_credentials():\r\n home_dir = os.getcwd()\r\n credential_path = os.path.join(home_dir,\r\n 'credential.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n flags.noauth_local_webserver = True\r\n credentials = tools.run_flow(flow, store, flags)\r\n logging.info('Storing credentials to ' + credential_path)\r\n return credentials", "def install_creds(arguments):\n\n global credentials\n if arguments.verbose:\n print \"Installing credentials...\"\n credentials = storage.get()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an endpoint for a did
async def get_endpoint(self, did: str) -> messages.Endpoint: return await self._fetch( messages.EndpointReq(did), messages.Endpoint)
[ "async def get_endpoint(self, did: str) -> str:\n\n logger = logging.getLogger(__name__)\n logger.debug('_AgentCore.get_endpoint: >>> did: {}'.format(did))\n\n rv = json.dumps({})\n req_json = await ledger.build_get_attrib_request(\n self.did,\n did,\n 'endpoint',\n None,\n None)\n resp_json = await ledger.submit_request(self.pool.handle, req_json)\n await asyncio.sleep(0)\n\n resp = json.loads(resp_json)\n if ('op' in resp) and (resp['op'] == 'REQNACK'):\n logger.error('_AgentCore.get_endpoint: {}'.format(resp['reason']))\n else:\n data_json = (json.loads(resp_json))['result']['data'] # it's double-encoded on the ledger\n if data_json:\n rv = json.dumps(json.loads(data_json)['endpoint'])\n else:\n logger.info('_AgentCore.get_endpoint: ledger query returned response with no data')\n\n logger.debug('_AgentCore.get_endpoint: <<< {}'.format(rv))\n return rv", "def _get_endpoint(self, endpoint):\n url = self._url(endpoint)\n data = self._client.get(url).json()\n\n # JSON returned has a top-level key that is the name of the endpoint, so return the list under that.\n return data[endpoint]", "def get_endpoint(self, target):\n endpoint = self._parse_project(target)[1]\n try:\n return self.endpoints[endpoint]\n except KeyError:\n raise ClickException(\n \"Could not find endpoint %s. Please define it in your \"\n \"scrapinghub.yml.\" % endpoint\n )", "def test_get_endpoint(client):\n meta = load_response(client.get_endpoint).metadata\n epid = meta[\"endpoint_id\"]\n\n # load the endpoint document\n ep_doc = client.get_endpoint(epid)\n\n # check that the contents are basically OK\n assert ep_doc[\"DATA_TYPE\"] == \"endpoint\"\n assert ep_doc[\"id\"] == epid\n assert \"display_name\" in ep_doc", "def get_endpoint(self, service):\n try:\n endpoint = self.service_endpoint_map[service]\n except KeyError:\n LOG.error(\"Unknown service: %s\" % service)\n endpoint = None\n\n return endpoint", "def _get_endpoint_url(self, endpoint_name) -> str:\n ep_url = None\n for ep_dict in ENDPOINT_LIST:\n if ep_dict['endpoint_name'] == endpoint_name:\n ep_url = ep_dict['endpoint']\n return ep_url", "def getRangeGetEndpoint():\n endpoint = config.get(\"rangeget_endpoint\")\n return endpoint", "def _get_endpoint(client, **kwargs):\n return client.service_catalog.url_for(\n service_type=kwargs.get('service_type') or 'sip',\n endpoint_type=kwargs.get('endpoint_type') or 'publicURL')", "def get_url(endpoint_or_url):\n try: \n return url_for(endpoint_or_url)\n except: \n return endpoint_or_url", "def instance_endpoint(self) -> \"Endpoint\":\n ...", "def get_endpoint(self, urlname):\n #consult inner site for resources & endpoints\n #CONSIDER: parent lookups are troublesome\n if urlname not in self.endpoint_state['endpoints']:\n endpoint = self.inner_site.get_endpoint_from_urlname(urlname)\n bound_endpoint = endpoint.fork(api_request=self)\n if bound_endpoint != self.endpoint_state['endpoints'][urlname]:\n pass\n if getattr(bound_endpoint, '_parent', None):\n parent_name = bound_endpoint._parent.get_url_name()\n parent = self.inner_site.get_endpoint_from_urlname(parent_name)\n bound_endpoint._parent = parent\n return self.endpoint_state['endpoints'][urlname]", "def endpointurl(self):\n return self._endpointurl", "def endpoint(self):\n return self._idx_url", "def service_endpoint(self) -> str:\n pass", "def url_for(self, attr=None, filter_value=None,\r\n service_type=None, endpoint_type=\"publicURL\",\r\n service_name=None, volume_service_name=None):\r\n matching_endpoints = []\r\n # We don't always get a service catalog back ...\r\n if \"serviceCatalog\" not in self.catalog[\"access\"]:\r\n return None\r\n\r\n # Full catalog ...\r\n catalog = self.catalog[\"access\"][\"serviceCatalog\"]\r\n for service in catalog:\r\n if service.get(\"type\") != service_type:\r\n continue\r\n endpoints = service[\"endpoints\"]\r\n for endpoint in endpoints:\r\n if not filter_value or endpoint.get(attr) == filter_value:\r\n endpoint[\"serviceName\"] = service.get(\"name\")\r\n matching_endpoints.append(endpoint)\r\n\r\n if not matching_endpoints:\r\n raise exc.EndpointNotFound()\r\n elif len(matching_endpoints) > 1:\r\n raise exc.AmbiguousEndpoints(endpoints=matching_endpoints)\r\n else:\r\n return matching_endpoints[0][endpoint_type]", "def get_endpoint_info(self, endpoint_id=None):\n if self.endpoint_map is None or len(list(self.endpoint_map.keys())) <= 0:\n rospy.logerr('Cannot use endpoint signals without any endpoints!')\n return\n endpoint_id = list(self.endpoint_map.keys())[0] if endpoint_id is None else endpoint_id\n return (endpoint_id, self.endpoint_map[endpoint_id])", "def _get_endpoint_id(self, name):\n return name.split(\".\")[0]", "def get_endpoint(\n project: str,\n endpoint_id: str,\n start: str = Query(default=\"now-1h\"),\n end: str = Query(default=\"now\"),\n metrics: bool = Query(default=False),\n features: bool = Query(default=False),\n):\n\n _verify_endpoint(project, endpoint_id)\n\n endpoint = _get_endpoint_kv_record_by_id(\n endpoint_id, ENDPOINT_TABLE_ATTRIBUTES_WITH_FEATURES,\n )\n\n if not endpoint:\n url = f\"/projects/{project}/model-endpoints/{endpoint_id}\"\n raise MLRunNotFoundError(f\"Endpoint {endpoint_id} not found - {url}\")\n\n endpoint_metrics = None\n if metrics:\n endpoint_metrics = _get_endpoint_metrics(\n endpoint_id=endpoint_id,\n start=start,\n end=end,\n name=[\"predictions\", \"latency\"],\n )\n\n endpoint_features = None\n if features:\n endpoint_features = _get_endpoint_features(\n project=project, endpoint_id=endpoint_id, features=endpoint.get(\"features\")\n )\n\n return ModelEndpointState(\n endpoint=ModelEndpoint(\n metadata=ModelEndpointMetadata(\n project=endpoint.get(\"project\"),\n tag=endpoint.get(\"tag\"),\n labels=json.loads(endpoint.get(\"labels\", \"\")),\n ),\n spec=ModelEndpointSpec(\n model=endpoint.get(\"model\"),\n function=endpoint.get(\"function\"),\n model_class=endpoint.get(\"model_class\"),\n ),\n status=ObjectStatus(state=\"active\"),\n ),\n first_request=endpoint.get(\"first_request\"),\n last_request=endpoint.get(\"last_request\"),\n error_count=endpoint.get(\"error_count\"),\n alert_count=endpoint.get(\"alert_count\"),\n drift_status=endpoint.get(\"drift_status\"),\n metrics=endpoint_metrics,\n features=endpoint_features,\n )", "def _get_endpoint(self, client, **kwargs):\n endpoint_kwargs = {\n 'service_type': kwargs.get('service_type') or 'volt',\n 'endpoint_type': kwargs.get('endpoint_type') or 'publicURL',\n }\n\n if kwargs.get('region_name'):\n endpoint_kwargs['attr'] = 'region'\n endpoint_kwargs['filter_value'] = kwargs.get('region_name')\n\n return client.service_catalog.url_for(**endpoint_kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct a proof from credentials in the holder's wallet given a proof request
async def construct_proof(self, holder_id: str, proof_req: dict, wql_filters: dict = None, cred_ids: set = None) -> messages.ConstructedProof: return await self._fetch( messages.ConstructProofReq( holder_id, messages.ProofRequest(proof_req, wql_filters), cred_ids), messages.ConstructedProof)
[ "async def create_proof(self, proof_req: dict, briefs: Union[dict, Sequence[dict]], requested_creds: dict) -> str:\n\n LOGGER.debug(\n 'HolderProver.create_proof >>> proof_req: %s, briefs: %s, requested_creds: %s',\n proof_req,\n briefs,\n requested_creds)\n\n if not self.wallet.handle:\n LOGGER.debug('HolderProver.create_proof <!< Wallet %s is closed', self.name)\n raise WalletState('Wallet {} is closed'.format(self.name))\n\n label = await self._assert_link_secret('create_proof')\n\n cd_ids = set()\n x_cd_ids = set()\n for brief in iter_briefs(briefs):\n cd_id = brief['cred_info']['cred_def_id']\n if cd_id in cd_ids and cd_id not in x_cd_ids:\n x_cd_ids.add(cd_id)\n cd_ids.add(cd_id)\n if x_cd_ids:\n LOGGER.debug('HolderProver.create_proof <!< briefs specification out of focus (non-uniqueness)')\n raise CredentialFocus('Briefs list repeats cred defs: {}'.format(x_cd_ids))\n\n s_id2schema = {} # schema identifier to schema\n cd_id2cred_def = {} # credential definition identifier to credential definition\n rr_id2timestamp = {} # revocation registry of interest to timestamp of interest (or None)\n rr_id2cr_id = {} # revocation registry of interest to credential revocation identifier\n for brief in iter_briefs(briefs):\n interval = brief.get('interval', None)\n cred_info = brief['cred_info']\n s_id = cred_info['schema_id']\n if not ok_schema_id(s_id):\n LOGGER.debug('HolderProver.create_proof <!< Bad schema id %s', s_id)\n raise BadIdentifier('Bad schema id {}'.format(s_id))\n\n if s_id not in s_id2schema:\n schema = json.loads(await self.get_schema(s_id)) # add to cache en passant\n if not schema:\n LOGGER.debug(\n 'HolderProver.create_proof <!< absent schema %s, proof req may be for another ledger',\n s_id)\n raise AbsentSchema('Absent schema {}, proof req may be for another ledger'.format(s_id))\n s_id2schema[s_id] = schema\n\n cd_id = cred_info['cred_def_id']\n if not ok_cred_def_id(cd_id):\n LOGGER.debug('HolderProver.create_proof <!< Bad cred def id %s', cd_id)\n raise BadIdentifier('Bad cred def id {}'.format(cd_id))\n\n if cd_id not in cd_id2cred_def:\n cred_def = json.loads(await self.get_cred_def(cd_id)) # add to cache en passant\n cd_id2cred_def[cd_id] = cred_def\n\n rr_id = cred_info['rev_reg_id']\n if rr_id:\n if not ok_rev_reg_id(rr_id):\n LOGGER.debug('HolderProver.create_proof <!< Bad rev reg id %s', rr_id)\n raise BadIdentifier('Bad rev reg id {}'.format(rr_id))\n\n await self._sync_revoc_for_proof(rr_id) # link tails file to its rr_id if it's new\n if interval:\n if rr_id not in rr_id2timestamp:\n if interval['to'] > int(time()):\n LOGGER.debug(\n 'HolderProver.create_proof <!< interval to %s for rev reg %s is in the future',\n interval['to'],\n rr_id)\n raise BadRevStateTime(\n 'Revocation registry {} timestamp {} is in the future'.format(rr_id, interval['to']))\n rr_id2timestamp[rr_id] = interval['to']\n elif 'revocation' in cd_id2cred_def[cd_id]['value']:\n LOGGER.debug(\n 'HolderProver.create_proof <!< brief on cred def id %s missing non-revocation interval',\n cd_id)\n raise AbsentInterval('Brief on cred def id {} missing non-revocation interval'.format(cd_id))\n if rr_id in rr_id2cr_id:\n continue\n rr_id2cr_id[rr_id] = cred_info['cred_rev_id']\n\n rr_id2rev_state = {} # revocation registry identifier to its state\n with REVO_CACHE.lock:\n for rr_id in rr_id2timestamp:\n revo_cache_entry = REVO_CACHE.get(rr_id, None)\n tails = revo_cache_entry.tails if revo_cache_entry else None\n if tails is None: # missing tails file\n LOGGER.debug('HolderProver.create_proof <!< missing tails file for rev reg id %s', rr_id)\n raise AbsentTails('Missing tails file for rev reg id {}'.format(rr_id))\n rr_def_json = await self.get_rev_reg_def(rr_id)\n (rr_delta_json, ledger_timestamp) = await revo_cache_entry.get_delta_json(\n self._build_rr_delta_json,\n rr_id2timestamp[rr_id],\n rr_id2timestamp[rr_id])\n rr_state_json = await anoncreds.create_revocation_state(\n tails.reader_handle,\n rr_def_json,\n rr_delta_json,\n ledger_timestamp,\n rr_id2cr_id[rr_id])\n rr_id2rev_state[rr_id] = {\n rr_id2timestamp[rr_id]: json.loads(rr_state_json)\n }\n\n rv = await anoncreds.prover_create_proof(\n self.wallet.handle,\n json.dumps(proof_req),\n json.dumps(requested_creds),\n label,\n json.dumps(s_id2schema),\n json.dumps(cd_id2cred_def),\n json.dumps(rr_id2rev_state))\n LOGGER.debug('HolderProver.create_proof <<< %s', rv)\n return rv", "async def request_proof(self,\n connection_id: str,\n proof_req: messages.ProofRequest,\n cred_ids: set = None,\n params: dict = None) -> messages.ConstructedProof:\n return await self._fetch(\n messages.RequestProofReq(connection_id, proof_req, cred_ids, params),\n messages.VerifiedProof)", "def genproof(publickey, data, authenticators, challenge):\n pass", "async def verify_proof(self, proof_req: dict, proof: dict) -> str:\n\n logger = logging.getLogger(__name__)\n logger.debug('Verifier.verify_proof: >>> proof_req: {}, proof: {}'.format(\n proof_req,\n proof))\n\n claims = proof['identifiers']\n uuid2schema = {}\n uuid2claim_def = {}\n for claim_uuid in claims:\n claim_s_key = schema_key_for(claims[claim_uuid]['schema_key'])\n schema = json.loads(await self.get_schema(claim_s_key))\n uuid2schema[claim_uuid] = schema\n uuid2claim_def[claim_uuid] = json.loads(await self.get_claim_def(\n schema['seqNo'],\n claims[claim_uuid]['issuer_did']))\n\n rv = json.dumps(await anoncreds.verifier_verify_proof(\n json.dumps(proof_req),\n json.dumps(proof),\n json.dumps(uuid2schema),\n json.dumps(uuid2claim_def),\n json.dumps({}))) # revoc_regs_json\n\n logger.debug('Verifier.verify_proof: <<< {}'.format(rv))\n return rv", "def __create_proof(self):\n\n # Create the block base on which the salt will be concatenated\n base_block_str = ''\n for transaction in self.__transactions:\n base_block_str += str(transaction)\n base_block_str += self.__previous_hash\n\n # Find a salt that creates the right hash\n while True:\n guess_salt = hex(self.__xorshift.getrandbits(self.proof_bitsize)).lstrip('0x')\n guess = base_block_str + guess_salt\n hash_try = self.__hash.hash(guess)\n\n if hash_try.endswith('0' * self.proof_complexity):\n self.__proof = guess_salt\n return", "async def generate_proof_request(self, spec_id: str) -> messages.ProofRequest:\n return await self._fetch(\n messages.GenerateProofRequestReq(spec_id),\n messages.ProofRequest)", "async def get_cred_briefs_by_proof_req_q(self, proof_req_json: str, x_queries_json: str = None) -> str:\n\n LOGGER.debug(\n ('HolderProver.get_cred_briefs_by_proof_req_q >>> proof_req_json: %s, x_queries_json: %s'),\n proof_req_json,\n x_queries_json)\n\n if not self.wallet.handle:\n LOGGER.debug('HolderProver.get_cred_briefs_by_proof_req_q <!< Wallet %s is closed', self.name)\n raise WalletState('Wallet {} is closed'.format(self.name))\n\n def _pred_filter(brief):\n nonlocal pred_refts\n for attr, preds in pred_refts.get(brief['cred_info']['cred_def_id'], {}).items():\n if any(Predicate.get(p[0]).value.no(brief['cred_info']['attrs'][attr], p[1]) for p in preds.values()):\n return False\n return True\n\n rv = {}\n item_refts = set()\n x_queries = json.loads(x_queries_json or '{}')\n for k in x_queries:\n x_queries[k] = canon_cred_wql(x_queries[k]) # indy-sdk requires attr name canonicalization\n item_refts.add(k)\n\n proof_req = json.loads(proof_req_json)\n item_refts.update(uuid for uuid in proof_req['requested_predicates'])\n if not x_queries:\n item_refts.update(uuid for uuid in proof_req['requested_attributes']) # get all req attrs if no extra wql\n handle = await anoncreds.prover_search_credentials_for_proof_req(\n self.wallet.handle,\n proof_req_json,\n json.dumps(x_queries) if x_queries else None)\n pred_refts = proof_req_pred_referents(proof_req)\n\n try:\n for item_referent in item_refts:\n count = Wallet.DEFAULT_CHUNK\n while count == Wallet.DEFAULT_CHUNK:\n fetched = json.loads(await anoncreds.prover_fetch_credentials_for_proof_req(\n handle,\n item_referent,\n Wallet.DEFAULT_CHUNK))\n count = len(fetched)\n for brief in fetched: # apply predicates from proof req here\n if brief['cred_info']['referent'] not in rv and _pred_filter(brief):\n rv[brief['cred_info']['referent']] = brief\n finally:\n await anoncreds.prover_close_credentials_search_for_proof_req(handle)\n\n rv_json = json.dumps(rv)\n LOGGER.debug('HolderProver.get_cred_briefs_by_proof_req_q <<< %s', rv_json)\n return rv_json", "def create_disclosure_proof(\n pk: PublicKey,\n credential: AnonymousCredential,\n hidden_attributes: List[Attribute],\n message: bytes\n ) -> DisclosureProof:\n nb_attr = len(credential[0])\n\n #pick random r and t in Zp\n r = G1M.order().random()\n t = G1M.order().random()\n\n creds = credential[0]\n cred_sig1, cred_sig2 = jsonpickle.decode(credential[1][0]), jsonpickle.decode(credential[1][1])\n\n #create random signature \n random_signature = (cred_sig1.pow(r), (cred_sig2 * cred_sig1.pow(t)).pow(r))\n\n #putting all hidden and disclosed attributes in a dictionarry (to know which Yi corresponds to it)\n hidden_attr_index_dic = {}\n disclosed_attr_index_dic = {}\n for i, attr in enumerate(credential[0]):\n if attr in hidden_attributes:\n hidden_attr_index_dic[i] = attr\n else:\n disclosed_attr_index_dic[i] = attr\n\n #compute the commitment using all hidden attributes\n right_side_commit = (random_signature[0].pair(jsonpickle.decode(pk[1 + nb_attr]))).pow(t)\n\n for i in hidden_attr_index_dic:\n right_side_commit = right_side_commit * ((random_signature[0].pair(jsonpickle.decode(pk[3 + nb_attr + i]))).pow(Bn.from_binary(hidden_attr_index_dic[i].encode())))\n\n #create zero knowledge proof for the showing protocol\n proof = zero_knowledge_proof_showing_protocol(t, hidden_attr_index_dic, right_side_commit, pk, random_signature, message)\n\n #encode random signature\n random_signature = (jsonpickle.encode(random_signature[0]),jsonpickle.encode(random_signature[1]))\n return jsonpickle.encode(right_side_commit), random_signature, disclosed_attr_index_dic, proof", "def generate_zkp_prover_side(\n pk: PublicKey,\n t: Bn,\n user_attributes: AttributeMap,\n commitment: G1Element) -> ProofCommit:\n\n (g, Y, _, _, _) = pk\n\n # pick random big numbers for t and for all attributes\n rnd_t = G1.order().random()\n Rnd_t = g ** rnd_t\n\n rnd_is = [(i, G1.order().random()) for i, _ in user_attributes]\n Rnd_is = [(i, Y_i ** rnd_i) for i, Y_i, rnd_i in filterY(Y, rnd_is)]\n\n # Create the challenge\n h_Rnd_t = hash_sha(Rnd_t)\n h_pk = hash_pk(pk)\n h_Rnd_is = hash_Rnd_is(Rnd_is)\n h_commit = hash_sha(commitment)\n\n challenge = Bn(abs(h_Rnd_t + h_pk + h_Rnd_is + h_commit))\n\n # Answers to challenge\n s_t = rnd_t + challenge * t\n s_is = [(i, rnd_i + challenge * a_i) for i, rnd_i, a_i in idx_zip(rnd_is, user_attributes)]\n\n return Rnd_t, Rnd_is, challenge, s_t, s_is", "def verifyPredicateProof(proof: PredicateProof, credDefPks, nonce,\n attrs: Dict[str, Dict[str, T]],\n revealedAttrs: Sequence[str],\n predicate: Dict[str, Sequence[str]]):\n\n Tau = []\n subProofC, subProofPredicate, C, CList = proof\n\n # Get all the random and prime numbers for verifying the proof\n c, evect, mvect, vvect, Aprime = subProofC\n alphavect, rvect, uvect = subProofPredicate\n\n Aprime, c, Tvect = getProofParams(subProofC, credDefPks, attrs,\n revealedAttrs)\n\n Tau.extend(get_values_of_dicts(Tvect))\n\n for key, val in predicate.items():\n p = credDefPks[key]\n Tval = C[key][TVAL]\n\n # Iterate over the predicates for a given credential(issuer)\n for k, value in val.items():\n\n Tdeltavect1 = (Tval[DELTA] * (p.Z ** value))\n Tdeltavect2 = (p.Z ** mvect[k]) * (p.S ** rvect[DELTA])\n Tdeltavect = (Tdeltavect1 ** (-1 * c)) * Tdeltavect2 % p.N\n\n Tuproduct = 1 % p.N\n for i in range(0, ITERATIONS):\n Tvalvect1 = (Tval[str(i)] ** (-1 * c))\n Tvalvect2 = (p.Z ** uvect[str(i)])\n Tvalvect3 = (p.S ** rvect[str(i)])\n Tau.append(Tvalvect1 * Tvalvect2 * Tvalvect3 % p.N)\n Tuproduct *= Tval[str(i)] ** uvect[str(i)]\n\n Tau.append(Tdeltavect)\n\n Qvect1 = (Tval[DELTA] ** (-1 * c))\n Qvect = Qvect1 * Tuproduct * (p.S ** alphavect) % p.N\n Tau.append(Qvect)\n\n tauAndC = reduce(lambda x, y: x + y, [Tau, CList])\n cvect = cmod.integer(get_hash(nonce, *tauAndC))\n\n return c == cvect", "def _check_proof_vs_proposal():\n proof_req = pres_ex_record.pres_request.attachment(\n IndyPresExchangeHandler.format\n )\n\n # revealed attrs\n for reft, attr_spec in proof[\"requested_proof\"][\"revealed_attrs\"].items():\n proof_req_attr_spec = proof_req[\"requested_attributes\"].get(reft)\n if not proof_req_attr_spec:\n raise V20PresFormatHandlerError(\n f\"Presentation referent {reft} not in proposal request\"\n )\n req_restrictions = proof_req_attr_spec.get(\"restrictions\", {})\n\n name = proof_req_attr_spec[\"name\"]\n proof_value = attr_spec[\"raw\"]\n sub_proof_index = attr_spec[\"sub_proof_index\"]\n schema_id = proof[\"identifiers\"][sub_proof_index][\"schema_id\"]\n cred_def_id = proof[\"identifiers\"][sub_proof_index][\"cred_def_id\"]\n criteria = {\n \"schema_id\": schema_id,\n \"schema_issuer_did\": schema_id.split(\":\")[-4],\n \"schema_name\": schema_id.split(\":\")[-2],\n \"schema_version\": schema_id.split(\":\")[-1],\n \"cred_def_id\": cred_def_id,\n \"issuer_did\": cred_def_id.split(\":\")[-5],\n f\"attr::{name}::value\": proof_value,\n }\n\n if (\n not any(r.items() <= criteria.items() for r in req_restrictions)\n and len(req_restrictions) != 0\n ):\n raise V20PresFormatHandlerError(\n f\"Presented attribute {reft} does not satisfy proof request \"\n f\"restrictions {req_restrictions}\"\n )\n\n # revealed attr groups\n for reft, attr_spec in (\n proof[\"requested_proof\"].get(\"revealed_attr_groups\", {}).items()\n ):\n proof_req_attr_spec = proof_req[\"requested_attributes\"].get(reft)\n if not proof_req_attr_spec:\n raise V20PresFormatHandlerError(\n f\"Presentation referent {reft} not in proposal request\"\n )\n req_restrictions = proof_req_attr_spec.get(\"restrictions\", {})\n proof_values = {\n name: values[\"raw\"] for name, values in attr_spec[\"values\"].items()\n }\n sub_proof_index = attr_spec[\"sub_proof_index\"]\n schema_id = proof[\"identifiers\"][sub_proof_index][\"schema_id\"]\n cred_def_id = proof[\"identifiers\"][sub_proof_index][\"cred_def_id\"]\n criteria = {\n \"schema_id\": schema_id,\n \"schema_issuer_did\": schema_id.split(\":\")[-4],\n \"schema_name\": schema_id.split(\":\")[-2],\n \"schema_version\": schema_id.split(\":\")[-1],\n \"cred_def_id\": cred_def_id,\n \"issuer_did\": cred_def_id.split(\":\")[-5],\n **{\n f\"attr::{name}::value\": value\n for name, value in proof_values.items()\n },\n }\n\n if (\n not any(r.items() <= criteria.items() for r in req_restrictions)\n and len(req_restrictions) != 0\n ):\n raise V20PresFormatHandlerError(\n f\"Presented attr group {reft} does not satisfy proof request \"\n f\"restrictions {req_restrictions}\"\n )\n\n # predicate bounds\n for reft, pred_spec in proof[\"requested_proof\"][\"predicates\"].items():\n proof_req_pred_spec = proof_req[\"requested_predicates\"].get(reft)\n if not proof_req_pred_spec:\n raise V20PresFormatHandlerError(\n f\"Presentation referent {reft} not in proposal request\"\n )\n req_name = proof_req_pred_spec[\"name\"]\n req_pred = Predicate.get(proof_req_pred_spec[\"p_type\"])\n req_value = proof_req_pred_spec[\"p_value\"]\n req_restrictions = proof_req_pred_spec.get(\"restrictions\", {})\n for req_restriction in req_restrictions:\n for k in list(req_restriction): # cannot modify en passant\n if k.startswith(\"attr::\"):\n req_restriction.pop(k) # let indy-sdk reject mismatch here\n sub_proof_index = pred_spec[\"sub_proof_index\"]\n for ge_proof in proof[\"proof\"][\"proofs\"][sub_proof_index][\n \"primary_proof\"\n ][\"ge_proofs\"]:\n proof_pred_spec = ge_proof[\"predicate\"]\n if proof_pred_spec[\"attr_name\"] != canon(req_name):\n continue\n if not (\n Predicate.get(proof_pred_spec[\"p_type\"]) is req_pred\n and proof_pred_spec[\"value\"] == req_value\n ):\n raise V20PresFormatHandlerError(\n f\"Presentation predicate on {req_name} \"\n \"mismatches proposal request\"\n )\n break\n else:\n raise V20PresFormatHandlerError(\n f\"Proposed request predicate on {req_name} not in presentation\"\n )\n\n schema_id = proof[\"identifiers\"][sub_proof_index][\"schema_id\"]\n cred_def_id = proof[\"identifiers\"][sub_proof_index][\"cred_def_id\"]\n criteria = {\n \"schema_id\": schema_id,\n \"schema_issuer_did\": schema_id.split(\":\")[-4],\n \"schema_name\": schema_id.split(\":\")[-2],\n \"schema_version\": schema_id.split(\":\")[-1],\n \"cred_def_id\": cred_def_id,\n \"issuer_did\": cred_def_id.split(\":\")[-5],\n }\n\n if (\n not any(r.items() <= criteria.items() for r in req_restrictions)\n and len(req_restrictions) != 0\n ):\n raise V20PresFormatHandlerError(\n f\"Presented predicate {reft} does not satisfy proof request \"\n f\"restrictions {req_restrictions}\"\n )", "async def create_bound_request(\n self,\n pres_ex_record: V20PresExRecord,\n request_data: dict = None,\n ) -> Tuple[V20PresFormat, AttachDecorator]:\n indy_proof_request = pres_ex_record.pres_proposal.attachment(\n IndyPresExchangeHandler.format\n )\n if request_data:\n indy_proof_request[\"name\"] = request_data.get(\"name\", \"proof-request\")\n indy_proof_request[\"version\"] = request_data.get(\"version\", \"1.0\")\n indy_proof_request[\"nonce\"] = (\n request_data.get(\"nonce\") or await generate_pr_nonce()\n )\n else:\n indy_proof_request[\"name\"] = \"proof-request\"\n indy_proof_request[\"version\"] = \"1.0\"\n indy_proof_request[\"nonce\"] = await generate_pr_nonce()\n return self.get_format_data(PRES_20_REQUEST, indy_proof_request)", "def create_new_verifier(u, p, pf):\n\n s = random_string(saltlen)\n n, g = pf\n v = pow(g, private_key(u, s, p), n)\n return (s, v)", "async def build_and_send_nym_request(pool_handle, wallet_handle, submitter_did,\n target_did, target_verkey, alias, role):\n try:\n nym_txn_req = await ledger.build_nym_request(submitter_did, target_did, target_verkey, alias, role)\n await ledger.sign_and_submit_request(pool_handle, wallet_handle, submitter_did, nym_txn_req)\n except IndyError as E:\n print(Colors.FAIL + str(E) + Colors.ENDC)\n raise", "async def create_credential_request(self, holder_id: str, cred_offer: dict,\n cred_def_id: str) -> messages.CredentialRequest:\n return await self._fetch(\n messages.GenerateCredentialRequestReq(\n holder_id,\n messages.CredentialOffer(cred_offer, cred_def_id)),\n messages.CredentialRequest)", "def obtain_credential(\n pk: PublicKey,\n response: BlindSignature,\n state: RequestState\n ) -> AnonymousCredential:\n\n signature1, signature2 = jsonpickle.decode(response[0][0]), jsonpickle.decode(response[0][1])\n\n t = jsonpickle.decode(state[0])\n\n #compute final siganture with the t sampled during the issue request\n final_signature = (jsonpickle.encode(signature1)\n ,jsonpickle.encode(signature2/(signature1.pow(t))))\n\n # getting the ordered list of credentials from issuer and user attributes\n issuer_attributes = response[1]\n user_attributes = state[1]\n\n credentials_dic = dict(issuer_attributes)\n credentials_dic.update(user_attributes)\n\n #putting them in the right order (order is very important, since part of the signature on the credentials is based on it)\n credentials = []\n for i in sorted (credentials_dic.keys()):\n credentials.append(credentials_dic[i])\n\n #checking if signature is valid for these credentials\n assert verify(pk, final_signature, credentials)\n\n return credentials, final_signature", "def create(self, req, body):\n\n LOG.debug('Create verification request body: %s', body)\n context = req.environ['karbor.context']\n context.can(verification_policy.CREATE_POLICY)\n verification = body['verification']\n LOG.debug('Create verification request : %s', verification)\n\n parameters = verification.get(\"parameters\")\n\n verification_properties = {\n 'project_id': context.project_id,\n 'provider_id': verification.get('provider_id'),\n 'checkpoint_id': verification.get('checkpoint_id'),\n 'parameters': parameters,\n 'status': constants.VERIFICATION_STATUS_IN_PROGRESS,\n }\n\n verification_obj = objects.Verification(context=context,\n **verification_properties)\n verification_obj.create()\n\n try:\n self.protection_api.verification(context, verification_obj)\n except Exception:\n update_dict = {\n \"status\": constants.VERIFICATION_STATUS_FAILURE\n }\n verification_obj = self._verification_update(\n context,\n verification_obj.get(\"id\"),\n update_dict)\n\n retval = self._view_builder.detail(req, verification_obj)\n\n return retval", "def verify_non_interactive_proof_showing_protocol(proof,pk,right_side_commit,disclosed_attributes, random_signature, message):\n nb_attr = int((len(pk) - 3) / 2)\n\n R = jsonpickle.decode(proof[0])\n sm = proof[1]\n st = jsonpickle.decode(proof[2])\n random_signature = (jsonpickle.decode(random_signature[0]),jsonpickle.decode(random_signature[1]))\n right_side_commit = jsonpickle.decode(right_side_commit)\n\n #computing challenge from all public info: public key, commitment and R, as well as message m\n #doing SHA256 hash of the concat binary of the public info\n challenge = right_side_commit.to_binary() + R.to_binary() + message\n for i in range(0,len(pk)):\n challenge = challenge + jsonpickle.decode(pk[i]).to_binary()\n challenge = hashlib.sha256(challenge).digest()\n #convert challenge to Bn\n challenge = Bn.from_binary(challenge)\n\n verif = right_side_commit.pow(challenge)\n for i in sm:\n verif = verif * ((random_signature[0].pair(jsonpickle.decode(pk[3 + nb_attr + i]))).pow(jsonpickle.decode(sm[i])))\n verif = verif * (random_signature[0].pair(jsonpickle.decode(pk[1 + nb_attr]))).pow(st)\n\n #need to compute left side to check if it's equal to right side commitment using the bilinear function:\n left_side = random_signature[1].pair(jsonpickle.decode(pk[1 + nb_attr]))\n for i in disclosed_attributes:\n left_side = left_side * ((random_signature[0].pair(jsonpickle.decode(pk[3 + nb_attr + i]))).pow(-Bn.from_binary(disclosed_attributes[i].encode())))\n left_side = left_side / (random_signature[0].pair(jsonpickle.decode(pk[2 + nb_attr])))\n\n #check if verif == R and if left_side == right_side_commitment\n return ((R == verif) and (left_side == right_side_commit))", "def test_state_proof_checked_in_client_request(looper, txnPoolNodeSet,\n client1, wallet1):\n request = sendRandomRequest(wallet1, client1)\n responseTimeout = waits.expectedTransactionExecutionTime(nodeCount)\n looper.run(\n eventually(check_proved_reply_received,\n client1, request.identifier, request.reqId,\n retryWait=1, timeout=responseTimeout))\n checkResponseCorrectnessFromNodes(client1.inBox, request.reqId, F)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register a proof request specification
async def register_proof_spec(self, spec: dict) -> str: result = await self._fetch( messages.RegisterProofSpecReq(spec), messages.ProofSpecStatus) return result.spec_id
[ "def create_req(self):\n \n pass", "async def create_proof(self, proof_req: dict, briefs: Union[dict, Sequence[dict]], requested_creds: dict) -> str:\n\n LOGGER.debug(\n 'HolderProver.create_proof >>> proof_req: %s, briefs: %s, requested_creds: %s',\n proof_req,\n briefs,\n requested_creds)\n\n if not self.wallet.handle:\n LOGGER.debug('HolderProver.create_proof <!< Wallet %s is closed', self.name)\n raise WalletState('Wallet {} is closed'.format(self.name))\n\n label = await self._assert_link_secret('create_proof')\n\n cd_ids = set()\n x_cd_ids = set()\n for brief in iter_briefs(briefs):\n cd_id = brief['cred_info']['cred_def_id']\n if cd_id in cd_ids and cd_id not in x_cd_ids:\n x_cd_ids.add(cd_id)\n cd_ids.add(cd_id)\n if x_cd_ids:\n LOGGER.debug('HolderProver.create_proof <!< briefs specification out of focus (non-uniqueness)')\n raise CredentialFocus('Briefs list repeats cred defs: {}'.format(x_cd_ids))\n\n s_id2schema = {} # schema identifier to schema\n cd_id2cred_def = {} # credential definition identifier to credential definition\n rr_id2timestamp = {} # revocation registry of interest to timestamp of interest (or None)\n rr_id2cr_id = {} # revocation registry of interest to credential revocation identifier\n for brief in iter_briefs(briefs):\n interval = brief.get('interval', None)\n cred_info = brief['cred_info']\n s_id = cred_info['schema_id']\n if not ok_schema_id(s_id):\n LOGGER.debug('HolderProver.create_proof <!< Bad schema id %s', s_id)\n raise BadIdentifier('Bad schema id {}'.format(s_id))\n\n if s_id not in s_id2schema:\n schema = json.loads(await self.get_schema(s_id)) # add to cache en passant\n if not schema:\n LOGGER.debug(\n 'HolderProver.create_proof <!< absent schema %s, proof req may be for another ledger',\n s_id)\n raise AbsentSchema('Absent schema {}, proof req may be for another ledger'.format(s_id))\n s_id2schema[s_id] = schema\n\n cd_id = cred_info['cred_def_id']\n if not ok_cred_def_id(cd_id):\n LOGGER.debug('HolderProver.create_proof <!< Bad cred def id %s', cd_id)\n raise BadIdentifier('Bad cred def id {}'.format(cd_id))\n\n if cd_id not in cd_id2cred_def:\n cred_def = json.loads(await self.get_cred_def(cd_id)) # add to cache en passant\n cd_id2cred_def[cd_id] = cred_def\n\n rr_id = cred_info['rev_reg_id']\n if rr_id:\n if not ok_rev_reg_id(rr_id):\n LOGGER.debug('HolderProver.create_proof <!< Bad rev reg id %s', rr_id)\n raise BadIdentifier('Bad rev reg id {}'.format(rr_id))\n\n await self._sync_revoc_for_proof(rr_id) # link tails file to its rr_id if it's new\n if interval:\n if rr_id not in rr_id2timestamp:\n if interval['to'] > int(time()):\n LOGGER.debug(\n 'HolderProver.create_proof <!< interval to %s for rev reg %s is in the future',\n interval['to'],\n rr_id)\n raise BadRevStateTime(\n 'Revocation registry {} timestamp {} is in the future'.format(rr_id, interval['to']))\n rr_id2timestamp[rr_id] = interval['to']\n elif 'revocation' in cd_id2cred_def[cd_id]['value']:\n LOGGER.debug(\n 'HolderProver.create_proof <!< brief on cred def id %s missing non-revocation interval',\n cd_id)\n raise AbsentInterval('Brief on cred def id {} missing non-revocation interval'.format(cd_id))\n if rr_id in rr_id2cr_id:\n continue\n rr_id2cr_id[rr_id] = cred_info['cred_rev_id']\n\n rr_id2rev_state = {} # revocation registry identifier to its state\n with REVO_CACHE.lock:\n for rr_id in rr_id2timestamp:\n revo_cache_entry = REVO_CACHE.get(rr_id, None)\n tails = revo_cache_entry.tails if revo_cache_entry else None\n if tails is None: # missing tails file\n LOGGER.debug('HolderProver.create_proof <!< missing tails file for rev reg id %s', rr_id)\n raise AbsentTails('Missing tails file for rev reg id {}'.format(rr_id))\n rr_def_json = await self.get_rev_reg_def(rr_id)\n (rr_delta_json, ledger_timestamp) = await revo_cache_entry.get_delta_json(\n self._build_rr_delta_json,\n rr_id2timestamp[rr_id],\n rr_id2timestamp[rr_id])\n rr_state_json = await anoncreds.create_revocation_state(\n tails.reader_handle,\n rr_def_json,\n rr_delta_json,\n ledger_timestamp,\n rr_id2cr_id[rr_id])\n rr_id2rev_state[rr_id] = {\n rr_id2timestamp[rr_id]: json.loads(rr_state_json)\n }\n\n rv = await anoncreds.prover_create_proof(\n self.wallet.handle,\n json.dumps(proof_req),\n json.dumps(requested_creds),\n label,\n json.dumps(s_id2schema),\n json.dumps(cd_id2cred_def),\n json.dumps(rr_id2rev_state))\n LOGGER.debug('HolderProver.create_proof <<< %s', rv)\n return rv", "def test_add_specification(self):\n artifact_id = self.my_create_appliance(\"testspecification\")\n s.touch_to_add_specification(artifact_id,2,4)\n cores, ram = s.get_latest_specification(artifact_id)\n self.assertEqual(cores, 2)\n self.assertEqual(ram, 4)", "async def generate_proof_request(self, spec_id: str) -> messages.ProofRequest:\n return await self._fetch(\n messages.GenerateProofRequestReq(spec_id),\n messages.ProofRequest)", "async def request_proof(self,\n connection_id: str,\n proof_req: messages.ProofRequest,\n cred_ids: set = None,\n params: dict = None) -> messages.ConstructedProof:\n return await self._fetch(\n messages.RequestProofReq(connection_id, proof_req, cred_ids, params),\n messages.VerifiedProof)", "def accept_qualification_request(QualificationRequestId=None, IntegerValue=None):\n pass", "def __init__(__self__, *,\n name: pulumi.Input[str],\n parameters: pulumi.Input['RequestSchemeMatchConditionParametersArgs']):\n pulumi.set(__self__, \"name\", 'RequestScheme')\n pulumi.set(__self__, \"parameters\", parameters)", "def define(cls, spec):\n super().define(spec)\n spec.inputs['spin_type'].valid_type = ChoiceType(tuple(SpinType))\n spec.inputs['relax_type'].valid_type = ChoiceType([\n t for t in RelaxType if t not in (RelaxType.VOLUME, RelaxType.SHAPE, RelaxType.CELL)\n ])\n spec.inputs['electronic_type'].valid_type = ChoiceType(\n (ElectronicType.METAL, ElectronicType.INSULATOR, ElectronicType.UNKNOWN)\n )\n spec.inputs['engines']['relax']['code'].valid_type = CodeType('abinit')\n spec.inputs['protocol'].valid_type = ChoiceType(('fast', 'moderate', 'precise', 'verification-pbe-v1'))", "def add_requirements(obj, **kw):\n new = dict()\n new.update(obj.required)\n new.update(kw)\n obj.required = new", "async def verify_proof(self, proof_req: dict, proof: dict) -> str:\n\n logger = logging.getLogger(__name__)\n logger.debug('Verifier.verify_proof: >>> proof_req: {}, proof: {}'.format(\n proof_req,\n proof))\n\n claims = proof['identifiers']\n uuid2schema = {}\n uuid2claim_def = {}\n for claim_uuid in claims:\n claim_s_key = schema_key_for(claims[claim_uuid]['schema_key'])\n schema = json.loads(await self.get_schema(claim_s_key))\n uuid2schema[claim_uuid] = schema\n uuid2claim_def[claim_uuid] = json.loads(await self.get_claim_def(\n schema['seqNo'],\n claims[claim_uuid]['issuer_did']))\n\n rv = json.dumps(await anoncreds.verifier_verify_proof(\n json.dumps(proof_req),\n json.dumps(proof),\n json.dumps(uuid2schema),\n json.dumps(uuid2claim_def),\n json.dumps({}))) # revoc_regs_json\n\n logger.debug('Verifier.verify_proof: <<< {}'.format(rv))\n return rv", "def _construct_input_spec(self):", "def test_register_route_request(self):\n pass", "def create(self, req, body):\n\n LOG.debug('Create verification request body: %s', body)\n context = req.environ['karbor.context']\n context.can(verification_policy.CREATE_POLICY)\n verification = body['verification']\n LOG.debug('Create verification request : %s', verification)\n\n parameters = verification.get(\"parameters\")\n\n verification_properties = {\n 'project_id': context.project_id,\n 'provider_id': verification.get('provider_id'),\n 'checkpoint_id': verification.get('checkpoint_id'),\n 'parameters': parameters,\n 'status': constants.VERIFICATION_STATUS_IN_PROGRESS,\n }\n\n verification_obj = objects.Verification(context=context,\n **verification_properties)\n verification_obj.create()\n\n try:\n self.protection_api.verification(context, verification_obj)\n except Exception:\n update_dict = {\n \"status\": constants.VERIFICATION_STATUS_FAILURE\n }\n verification_obj = self._verification_update(\n context,\n verification_obj.get(\"id\"),\n update_dict)\n\n retval = self._view_builder.detail(req, verification_obj)\n\n return retval", "def test_conformance(self):\n self._request_valid(\"conformance\")", "def register_spec(cls, spec):\n spec = to_spec(spec)\n _SPEC_REGISTRY[cls] = spec\n return spec", "async def construct_proof(self, holder_id: str, proof_req: dict,\n wql_filters: dict = None,\n cred_ids: set = None) -> messages.ConstructedProof:\n return await self._fetch(\n messages.ConstructProofReq(\n holder_id,\n messages.ProofRequest(proof_req, wql_filters), cred_ids),\n messages.ConstructedProof)", "def request_add(self, r, form):\n\n if r in self._add_promises or r in self._del_promises:\n msg = \"Rule {} already registered for a promised update.\"\n raise ValueError(msg.format(r))\n else:\n self._add_promises[r] = form", "def test_signrequest_quick_create_create(self):\n pass", "def _constraints_for_new_request(cls, config):\n return {'count': npr.randint(5, 20, 1)[0]}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a proof request based on a previouslyregistered proof request spec
async def generate_proof_request(self, spec_id: str) -> messages.ProofRequest: return await self._fetch( messages.GenerateProofRequestReq(spec_id), messages.ProofRequest)
[ "async def create_proof(self, proof_req: dict, briefs: Union[dict, Sequence[dict]], requested_creds: dict) -> str:\n\n LOGGER.debug(\n 'HolderProver.create_proof >>> proof_req: %s, briefs: %s, requested_creds: %s',\n proof_req,\n briefs,\n requested_creds)\n\n if not self.wallet.handle:\n LOGGER.debug('HolderProver.create_proof <!< Wallet %s is closed', self.name)\n raise WalletState('Wallet {} is closed'.format(self.name))\n\n label = await self._assert_link_secret('create_proof')\n\n cd_ids = set()\n x_cd_ids = set()\n for brief in iter_briefs(briefs):\n cd_id = brief['cred_info']['cred_def_id']\n if cd_id in cd_ids and cd_id not in x_cd_ids:\n x_cd_ids.add(cd_id)\n cd_ids.add(cd_id)\n if x_cd_ids:\n LOGGER.debug('HolderProver.create_proof <!< briefs specification out of focus (non-uniqueness)')\n raise CredentialFocus('Briefs list repeats cred defs: {}'.format(x_cd_ids))\n\n s_id2schema = {} # schema identifier to schema\n cd_id2cred_def = {} # credential definition identifier to credential definition\n rr_id2timestamp = {} # revocation registry of interest to timestamp of interest (or None)\n rr_id2cr_id = {} # revocation registry of interest to credential revocation identifier\n for brief in iter_briefs(briefs):\n interval = brief.get('interval', None)\n cred_info = brief['cred_info']\n s_id = cred_info['schema_id']\n if not ok_schema_id(s_id):\n LOGGER.debug('HolderProver.create_proof <!< Bad schema id %s', s_id)\n raise BadIdentifier('Bad schema id {}'.format(s_id))\n\n if s_id not in s_id2schema:\n schema = json.loads(await self.get_schema(s_id)) # add to cache en passant\n if not schema:\n LOGGER.debug(\n 'HolderProver.create_proof <!< absent schema %s, proof req may be for another ledger',\n s_id)\n raise AbsentSchema('Absent schema {}, proof req may be for another ledger'.format(s_id))\n s_id2schema[s_id] = schema\n\n cd_id = cred_info['cred_def_id']\n if not ok_cred_def_id(cd_id):\n LOGGER.debug('HolderProver.create_proof <!< Bad cred def id %s', cd_id)\n raise BadIdentifier('Bad cred def id {}'.format(cd_id))\n\n if cd_id not in cd_id2cred_def:\n cred_def = json.loads(await self.get_cred_def(cd_id)) # add to cache en passant\n cd_id2cred_def[cd_id] = cred_def\n\n rr_id = cred_info['rev_reg_id']\n if rr_id:\n if not ok_rev_reg_id(rr_id):\n LOGGER.debug('HolderProver.create_proof <!< Bad rev reg id %s', rr_id)\n raise BadIdentifier('Bad rev reg id {}'.format(rr_id))\n\n await self._sync_revoc_for_proof(rr_id) # link tails file to its rr_id if it's new\n if interval:\n if rr_id not in rr_id2timestamp:\n if interval['to'] > int(time()):\n LOGGER.debug(\n 'HolderProver.create_proof <!< interval to %s for rev reg %s is in the future',\n interval['to'],\n rr_id)\n raise BadRevStateTime(\n 'Revocation registry {} timestamp {} is in the future'.format(rr_id, interval['to']))\n rr_id2timestamp[rr_id] = interval['to']\n elif 'revocation' in cd_id2cred_def[cd_id]['value']:\n LOGGER.debug(\n 'HolderProver.create_proof <!< brief on cred def id %s missing non-revocation interval',\n cd_id)\n raise AbsentInterval('Brief on cred def id {} missing non-revocation interval'.format(cd_id))\n if rr_id in rr_id2cr_id:\n continue\n rr_id2cr_id[rr_id] = cred_info['cred_rev_id']\n\n rr_id2rev_state = {} # revocation registry identifier to its state\n with REVO_CACHE.lock:\n for rr_id in rr_id2timestamp:\n revo_cache_entry = REVO_CACHE.get(rr_id, None)\n tails = revo_cache_entry.tails if revo_cache_entry else None\n if tails is None: # missing tails file\n LOGGER.debug('HolderProver.create_proof <!< missing tails file for rev reg id %s', rr_id)\n raise AbsentTails('Missing tails file for rev reg id {}'.format(rr_id))\n rr_def_json = await self.get_rev_reg_def(rr_id)\n (rr_delta_json, ledger_timestamp) = await revo_cache_entry.get_delta_json(\n self._build_rr_delta_json,\n rr_id2timestamp[rr_id],\n rr_id2timestamp[rr_id])\n rr_state_json = await anoncreds.create_revocation_state(\n tails.reader_handle,\n rr_def_json,\n rr_delta_json,\n ledger_timestamp,\n rr_id2cr_id[rr_id])\n rr_id2rev_state[rr_id] = {\n rr_id2timestamp[rr_id]: json.loads(rr_state_json)\n }\n\n rv = await anoncreds.prover_create_proof(\n self.wallet.handle,\n json.dumps(proof_req),\n json.dumps(requested_creds),\n label,\n json.dumps(s_id2schema),\n json.dumps(cd_id2cred_def),\n json.dumps(rr_id2rev_state))\n LOGGER.debug('HolderProver.create_proof <<< %s', rv)\n return rv", "async def request_proof(self,\n connection_id: str,\n proof_req: messages.ProofRequest,\n cred_ids: set = None,\n params: dict = None) -> messages.ConstructedProof:\n return await self._fetch(\n messages.RequestProofReq(connection_id, proof_req, cred_ids, params),\n messages.VerifiedProof)", "def genproof(publickey, data, authenticators, challenge):\n pass", "async def construct_proof(self, holder_id: str, proof_req: dict,\n wql_filters: dict = None,\n cred_ids: set = None) -> messages.ConstructedProof:\n return await self._fetch(\n messages.ConstructProofReq(\n holder_id,\n messages.ProofRequest(proof_req, wql_filters), cred_ids),\n messages.ConstructedProof)", "async def verify_proof(self, proof_req: dict, proof: dict) -> str:\n\n logger = logging.getLogger(__name__)\n logger.debug('Verifier.verify_proof: >>> proof_req: {}, proof: {}'.format(\n proof_req,\n proof))\n\n claims = proof['identifiers']\n uuid2schema = {}\n uuid2claim_def = {}\n for claim_uuid in claims:\n claim_s_key = schema_key_for(claims[claim_uuid]['schema_key'])\n schema = json.loads(await self.get_schema(claim_s_key))\n uuid2schema[claim_uuid] = schema\n uuid2claim_def[claim_uuid] = json.loads(await self.get_claim_def(\n schema['seqNo'],\n claims[claim_uuid]['issuer_did']))\n\n rv = json.dumps(await anoncreds.verifier_verify_proof(\n json.dumps(proof_req),\n json.dumps(proof),\n json.dumps(uuid2schema),\n json.dumps(uuid2claim_def),\n json.dumps({}))) # revoc_regs_json\n\n logger.debug('Verifier.verify_proof: <<< {}'.format(rv))\n return rv", "def _check_proof_vs_proposal():\n proof_req = pres_ex_record.pres_request.attachment(\n IndyPresExchangeHandler.format\n )\n\n # revealed attrs\n for reft, attr_spec in proof[\"requested_proof\"][\"revealed_attrs\"].items():\n proof_req_attr_spec = proof_req[\"requested_attributes\"].get(reft)\n if not proof_req_attr_spec:\n raise V20PresFormatHandlerError(\n f\"Presentation referent {reft} not in proposal request\"\n )\n req_restrictions = proof_req_attr_spec.get(\"restrictions\", {})\n\n name = proof_req_attr_spec[\"name\"]\n proof_value = attr_spec[\"raw\"]\n sub_proof_index = attr_spec[\"sub_proof_index\"]\n schema_id = proof[\"identifiers\"][sub_proof_index][\"schema_id\"]\n cred_def_id = proof[\"identifiers\"][sub_proof_index][\"cred_def_id\"]\n criteria = {\n \"schema_id\": schema_id,\n \"schema_issuer_did\": schema_id.split(\":\")[-4],\n \"schema_name\": schema_id.split(\":\")[-2],\n \"schema_version\": schema_id.split(\":\")[-1],\n \"cred_def_id\": cred_def_id,\n \"issuer_did\": cred_def_id.split(\":\")[-5],\n f\"attr::{name}::value\": proof_value,\n }\n\n if (\n not any(r.items() <= criteria.items() for r in req_restrictions)\n and len(req_restrictions) != 0\n ):\n raise V20PresFormatHandlerError(\n f\"Presented attribute {reft} does not satisfy proof request \"\n f\"restrictions {req_restrictions}\"\n )\n\n # revealed attr groups\n for reft, attr_spec in (\n proof[\"requested_proof\"].get(\"revealed_attr_groups\", {}).items()\n ):\n proof_req_attr_spec = proof_req[\"requested_attributes\"].get(reft)\n if not proof_req_attr_spec:\n raise V20PresFormatHandlerError(\n f\"Presentation referent {reft} not in proposal request\"\n )\n req_restrictions = proof_req_attr_spec.get(\"restrictions\", {})\n proof_values = {\n name: values[\"raw\"] for name, values in attr_spec[\"values\"].items()\n }\n sub_proof_index = attr_spec[\"sub_proof_index\"]\n schema_id = proof[\"identifiers\"][sub_proof_index][\"schema_id\"]\n cred_def_id = proof[\"identifiers\"][sub_proof_index][\"cred_def_id\"]\n criteria = {\n \"schema_id\": schema_id,\n \"schema_issuer_did\": schema_id.split(\":\")[-4],\n \"schema_name\": schema_id.split(\":\")[-2],\n \"schema_version\": schema_id.split(\":\")[-1],\n \"cred_def_id\": cred_def_id,\n \"issuer_did\": cred_def_id.split(\":\")[-5],\n **{\n f\"attr::{name}::value\": value\n for name, value in proof_values.items()\n },\n }\n\n if (\n not any(r.items() <= criteria.items() for r in req_restrictions)\n and len(req_restrictions) != 0\n ):\n raise V20PresFormatHandlerError(\n f\"Presented attr group {reft} does not satisfy proof request \"\n f\"restrictions {req_restrictions}\"\n )\n\n # predicate bounds\n for reft, pred_spec in proof[\"requested_proof\"][\"predicates\"].items():\n proof_req_pred_spec = proof_req[\"requested_predicates\"].get(reft)\n if not proof_req_pred_spec:\n raise V20PresFormatHandlerError(\n f\"Presentation referent {reft} not in proposal request\"\n )\n req_name = proof_req_pred_spec[\"name\"]\n req_pred = Predicate.get(proof_req_pred_spec[\"p_type\"])\n req_value = proof_req_pred_spec[\"p_value\"]\n req_restrictions = proof_req_pred_spec.get(\"restrictions\", {})\n for req_restriction in req_restrictions:\n for k in list(req_restriction): # cannot modify en passant\n if k.startswith(\"attr::\"):\n req_restriction.pop(k) # let indy-sdk reject mismatch here\n sub_proof_index = pred_spec[\"sub_proof_index\"]\n for ge_proof in proof[\"proof\"][\"proofs\"][sub_proof_index][\n \"primary_proof\"\n ][\"ge_proofs\"]:\n proof_pred_spec = ge_proof[\"predicate\"]\n if proof_pred_spec[\"attr_name\"] != canon(req_name):\n continue\n if not (\n Predicate.get(proof_pred_spec[\"p_type\"]) is req_pred\n and proof_pred_spec[\"value\"] == req_value\n ):\n raise V20PresFormatHandlerError(\n f\"Presentation predicate on {req_name} \"\n \"mismatches proposal request\"\n )\n break\n else:\n raise V20PresFormatHandlerError(\n f\"Proposed request predicate on {req_name} not in presentation\"\n )\n\n schema_id = proof[\"identifiers\"][sub_proof_index][\"schema_id\"]\n cred_def_id = proof[\"identifiers\"][sub_proof_index][\"cred_def_id\"]\n criteria = {\n \"schema_id\": schema_id,\n \"schema_issuer_did\": schema_id.split(\":\")[-4],\n \"schema_name\": schema_id.split(\":\")[-2],\n \"schema_version\": schema_id.split(\":\")[-1],\n \"cred_def_id\": cred_def_id,\n \"issuer_did\": cred_def_id.split(\":\")[-5],\n }\n\n if (\n not any(r.items() <= criteria.items() for r in req_restrictions)\n and len(req_restrictions) != 0\n ):\n raise V20PresFormatHandlerError(\n f\"Presented predicate {reft} does not satisfy proof request \"\n f\"restrictions {req_restrictions}\"\n )", "def create_req(self):\n \n pass", "async def build_proof_req_json(self, cd_id2spec: dict, cache_only: bool = False) -> str:\n\n LOGGER.debug('HolderProver.build_proof_req_json >>> cd_id2spec: %s, cache_only: %s', cd_id2spec, cache_only)\n\n cd_id2schema = {}\n now = int(time())\n proof_req = {\n 'nonce': str(int(time())),\n 'name': 'proof_req',\n 'version': '0.0',\n 'requested_attributes': {},\n 'requested_predicates': {}\n }\n\n for cd_id in cd_id2spec:\n interval = None\n cred_def = json.loads(await self.get_cred_def(cd_id))\n seq_no = cred_def_id2seq_no(cd_id)\n cd_id2schema[cd_id] = json.loads(await self.get_schema(seq_no))\n\n if 'revocation' in cred_def['value']:\n if cache_only and not (cd_id2spec.get(cd_id, {}) or {}).get('interval', None):\n with REVO_CACHE.lock:\n (fro, to) = REVO_CACHE.dflt_interval(cd_id)\n if not (fro and to):\n LOGGER.debug(\n 'HolderProver.build_proof_req_json: <!< no cached delta for non-revoc interval on %s',\n cd_id)\n raise AbsentInterval('No cached delta for non-revoc interval on {}'.format(cd_id))\n interval = {\n 'from': fro,\n 'to': to\n }\n else:\n fro_to = cd_id2spec[cd_id].get('interval', (now, now)) if cd_id2spec[cd_id] else (now, now)\n interval = {\n 'from': fro_to if isinstance(fro_to, int) else min(fro_to),\n 'to': fro_to if isinstance(fro_to, int) else max(fro_to)\n }\n\n for attr in (cd_id2spec[cd_id].get('attrs', cd_id2schema[cd_id]['attrNames']) or []\n if cd_id2spec[cd_id] else cd_id2schema[cd_id]['attrNames']):\n attr_uuid = '{}_{}_uuid'.format(seq_no, attr)\n proof_req['requested_attributes'][attr_uuid] = {\n 'name': attr,\n 'restrictions': [{\n 'cred_def_id': cd_id\n }]\n }\n if interval:\n proof_req['requested_attributes'][attr_uuid]['non_revoked'] = interval\n\n for attr in (cd_id2spec[cd_id].get('minima', {}) or {} if cd_id2spec[cd_id] else {}):\n pred_uuid = '{}_{}_uuid'.format(seq_no, attr)\n try:\n proof_req['requested_predicates'][pred_uuid] = {\n 'name': attr,\n 'p_type': '>=',\n 'p_value': int(cd_id2spec[cd_id]['minima'][attr]),\n 'restrictions': [{\n 'cred_def_id': cd_id\n }]\n }\n except ValueError:\n LOGGER.info(\n 'cannot build predicate on non-int minimum %s for %s',\n cd_id2spec[cd_id]['minima'][attr],\n attr)\n continue # int conversion failed - reject candidate\n if interval:\n proof_req['requested_predicates'][pred_uuid]['non_revoked'] = interval\n\n rv_json = json.dumps(proof_req)\n LOGGER.debug('HolderProver.build_proof_req_json <<< %s', rv_json)\n return rv_json", "async def create_bound_request(\n self,\n pres_ex_record: V20PresExRecord,\n request_data: dict = None,\n ) -> Tuple[V20PresFormat, AttachDecorator]:\n indy_proof_request = pres_ex_record.pres_proposal.attachment(\n IndyPresExchangeHandler.format\n )\n if request_data:\n indy_proof_request[\"name\"] = request_data.get(\"name\", \"proof-request\")\n indy_proof_request[\"version\"] = request_data.get(\"version\", \"1.0\")\n indy_proof_request[\"nonce\"] = (\n request_data.get(\"nonce\") or await generate_pr_nonce()\n )\n else:\n indy_proof_request[\"name\"] = \"proof-request\"\n indy_proof_request[\"version\"] = \"1.0\"\n indy_proof_request[\"nonce\"] = await generate_pr_nonce()\n return self.get_format_data(PRES_20_REQUEST, indy_proof_request)", "async def register_proof_spec(self, spec: dict) -> str:\n result = await self._fetch(\n messages.RegisterProofSpecReq(spec),\n messages.ProofSpecStatus)\n return result.spec_id", "def generate_confirmation_request(self):\n # () -> (str)\n chosen_template = choose(self.template_msgs) # will never return `None`\n return chosen_template.generate(self.context,\n {\"intent-summary\":\n self.intent_to_confirm[\"summary\"]})", "def gen_proof(self) -> Proof:\n assert not self.current_goals, \"non empty goal stack\"\n init_goal = self.get_goal_by_id(0)\n return self.gen_proof_for_goal(init_goal)", "def test_11_generate_requantizer_gains_corrections():\n\tcasalog.origin(\"test_11_generate_requantizer_gains_corrections\")\n\tcasalog.post(\"starting\")\n\n\tgencal('G192_flagged_6s.ms', caltable='calG192.requantizer', \\\n\t caltype='rq')", "def _get_inference_request(self, inputs, outputs, model_name, model_version,\n request_id, sequence_id):\n\n self._request = grpc_service_v2_pb2.ModelInferRequest()\n self._request.model_name = model_name\n self._request.model_version = model_version\n if request_id != None:\n self._request.id = request_id\n if sequence_id != None:\n self._request.sequence_id = sequence_id\n for infer_input in inputs:\n self._request.inputs.extend([infer_input._get_tensor()])\n for infer_output in outputs:\n self._request.outputs.extend([infer_output._get_tensor()])", "def make_request(req_type, what, details, ver=\"1.1\"):\n NL = \"\\r\\n\"\n req_line = \"{verb} {w} HTTP/{v}\".format(verb=req_type, w=what, v=ver)\n details = [\"{name}: {v}\".format(name=n,v=v) for (n,v) in details.iteritems()]\n detail_lines = NL.join(details)\n full_request = \"\".join([req_line, NL, detail_lines, NL, NL])\n return full_request", "def generate(reqs):\n\n # FIME(aloga): really simplistic\n hosts = []\n for i in xrange(50):\n hosts.append(simulator.hosts.Host(\"node-%02d\" % i,\n 2,\n 32 * 1024,\n 200 * 1024 * 1024))\n MANAGER.add_hosts(hosts)\n\n for req in reqs:\n if req[\"start\"] != 0 and req[\"start\"] < req[\"submit\"]:\n print \"discarding req %s\" % req[\"id\"]\n continue\n\n # FIXME(aloga). we should make this configurable. Or even adjust the\n # request to the available flavors.\n job_store = simpy.Store(ENV, capacity=1000)\n r = simulator.requests.Request(req, job_store)\n ENV.process(r.do())\n yield ENV.timeout(0)", "def create(self, req, body):\n\n LOG.debug('Create verification request body: %s', body)\n context = req.environ['karbor.context']\n context.can(verification_policy.CREATE_POLICY)\n verification = body['verification']\n LOG.debug('Create verification request : %s', verification)\n\n parameters = verification.get(\"parameters\")\n\n verification_properties = {\n 'project_id': context.project_id,\n 'provider_id': verification.get('provider_id'),\n 'checkpoint_id': verification.get('checkpoint_id'),\n 'parameters': parameters,\n 'status': constants.VERIFICATION_STATUS_IN_PROGRESS,\n }\n\n verification_obj = objects.Verification(context=context,\n **verification_properties)\n verification_obj.create()\n\n try:\n self.protection_api.verification(context, verification_obj)\n except Exception:\n update_dict = {\n \"status\": constants.VERIFICATION_STATUS_FAILURE\n }\n verification_obj = self._verification_update(\n context,\n verification_obj.get(\"id\"),\n update_dict)\n\n retval = self._view_builder.detail(req, verification_obj)\n\n return retval", "def __create_proof(self):\n\n # Create the block base on which the salt will be concatenated\n base_block_str = ''\n for transaction in self.__transactions:\n base_block_str += str(transaction)\n base_block_str += self.__previous_hash\n\n # Find a salt that creates the right hash\n while True:\n guess_salt = hex(self.__xorshift.getrandbits(self.proof_bitsize)).lstrip('0x')\n guess = base_block_str + guess_salt\n hash_try = self.__hash.hash(guess)\n\n if hash_try.endswith('0' * self.proof_complexity):\n self.__proof = guess_salt\n return", "async def get_cred_briefs_by_proof_req_q(self, proof_req_json: str, x_queries_json: str = None) -> str:\n\n LOGGER.debug(\n ('HolderProver.get_cred_briefs_by_proof_req_q >>> proof_req_json: %s, x_queries_json: %s'),\n proof_req_json,\n x_queries_json)\n\n if not self.wallet.handle:\n LOGGER.debug('HolderProver.get_cred_briefs_by_proof_req_q <!< Wallet %s is closed', self.name)\n raise WalletState('Wallet {} is closed'.format(self.name))\n\n def _pred_filter(brief):\n nonlocal pred_refts\n for attr, preds in pred_refts.get(brief['cred_info']['cred_def_id'], {}).items():\n if any(Predicate.get(p[0]).value.no(brief['cred_info']['attrs'][attr], p[1]) for p in preds.values()):\n return False\n return True\n\n rv = {}\n item_refts = set()\n x_queries = json.loads(x_queries_json or '{}')\n for k in x_queries:\n x_queries[k] = canon_cred_wql(x_queries[k]) # indy-sdk requires attr name canonicalization\n item_refts.add(k)\n\n proof_req = json.loads(proof_req_json)\n item_refts.update(uuid for uuid in proof_req['requested_predicates'])\n if not x_queries:\n item_refts.update(uuid for uuid in proof_req['requested_attributes']) # get all req attrs if no extra wql\n handle = await anoncreds.prover_search_credentials_for_proof_req(\n self.wallet.handle,\n proof_req_json,\n json.dumps(x_queries) if x_queries else None)\n pred_refts = proof_req_pred_referents(proof_req)\n\n try:\n for item_referent in item_refts:\n count = Wallet.DEFAULT_CHUNK\n while count == Wallet.DEFAULT_CHUNK:\n fetched = json.loads(await anoncreds.prover_fetch_credentials_for_proof_req(\n handle,\n item_referent,\n Wallet.DEFAULT_CHUNK))\n count = len(fetched)\n for brief in fetched: # apply predicates from proof req here\n if brief['cred_info']['referent'] not in rv and _pred_filter(brief):\n rv[brief['cred_info']['referent']] = brief\n finally:\n await anoncreds.prover_close_credentials_search_for_proof_req(handle)\n\n rv_json = json.dumps(rv)\n LOGGER.debug('HolderProver.get_cred_briefs_by_proof_req_q <<< %s', rv_json)\n return rv_json" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Request a proof from a holder connection
async def request_proof(self, connection_id: str, proof_req: messages.ProofRequest, cred_ids: set = None, params: dict = None) -> messages.ConstructedProof: return await self._fetch( messages.RequestProofReq(connection_id, proof_req, cred_ids, params), messages.VerifiedProof)
[ "def test_state_proof_checked_in_client_request(looper, txnPoolNodeSet,\n client1, wallet1):\n request = sendRandomRequest(wallet1, client1)\n responseTimeout = waits.expectedTransactionExecutionTime(nodeCount)\n looper.run(\n eventually(check_proved_reply_received,\n client1, request.identifier, request.reqId,\n retryWait=1, timeout=responseTimeout))\n checkResponseCorrectnessFromNodes(client1.inBox, request.reqId, F)", "async def create_proof(self, proof_req: dict, briefs: Union[dict, Sequence[dict]], requested_creds: dict) -> str:\n\n LOGGER.debug(\n 'HolderProver.create_proof >>> proof_req: %s, briefs: %s, requested_creds: %s',\n proof_req,\n briefs,\n requested_creds)\n\n if not self.wallet.handle:\n LOGGER.debug('HolderProver.create_proof <!< Wallet %s is closed', self.name)\n raise WalletState('Wallet {} is closed'.format(self.name))\n\n label = await self._assert_link_secret('create_proof')\n\n cd_ids = set()\n x_cd_ids = set()\n for brief in iter_briefs(briefs):\n cd_id = brief['cred_info']['cred_def_id']\n if cd_id in cd_ids and cd_id not in x_cd_ids:\n x_cd_ids.add(cd_id)\n cd_ids.add(cd_id)\n if x_cd_ids:\n LOGGER.debug('HolderProver.create_proof <!< briefs specification out of focus (non-uniqueness)')\n raise CredentialFocus('Briefs list repeats cred defs: {}'.format(x_cd_ids))\n\n s_id2schema = {} # schema identifier to schema\n cd_id2cred_def = {} # credential definition identifier to credential definition\n rr_id2timestamp = {} # revocation registry of interest to timestamp of interest (or None)\n rr_id2cr_id = {} # revocation registry of interest to credential revocation identifier\n for brief in iter_briefs(briefs):\n interval = brief.get('interval', None)\n cred_info = brief['cred_info']\n s_id = cred_info['schema_id']\n if not ok_schema_id(s_id):\n LOGGER.debug('HolderProver.create_proof <!< Bad schema id %s', s_id)\n raise BadIdentifier('Bad schema id {}'.format(s_id))\n\n if s_id not in s_id2schema:\n schema = json.loads(await self.get_schema(s_id)) # add to cache en passant\n if not schema:\n LOGGER.debug(\n 'HolderProver.create_proof <!< absent schema %s, proof req may be for another ledger',\n s_id)\n raise AbsentSchema('Absent schema {}, proof req may be for another ledger'.format(s_id))\n s_id2schema[s_id] = schema\n\n cd_id = cred_info['cred_def_id']\n if not ok_cred_def_id(cd_id):\n LOGGER.debug('HolderProver.create_proof <!< Bad cred def id %s', cd_id)\n raise BadIdentifier('Bad cred def id {}'.format(cd_id))\n\n if cd_id not in cd_id2cred_def:\n cred_def = json.loads(await self.get_cred_def(cd_id)) # add to cache en passant\n cd_id2cred_def[cd_id] = cred_def\n\n rr_id = cred_info['rev_reg_id']\n if rr_id:\n if not ok_rev_reg_id(rr_id):\n LOGGER.debug('HolderProver.create_proof <!< Bad rev reg id %s', rr_id)\n raise BadIdentifier('Bad rev reg id {}'.format(rr_id))\n\n await self._sync_revoc_for_proof(rr_id) # link tails file to its rr_id if it's new\n if interval:\n if rr_id not in rr_id2timestamp:\n if interval['to'] > int(time()):\n LOGGER.debug(\n 'HolderProver.create_proof <!< interval to %s for rev reg %s is in the future',\n interval['to'],\n rr_id)\n raise BadRevStateTime(\n 'Revocation registry {} timestamp {} is in the future'.format(rr_id, interval['to']))\n rr_id2timestamp[rr_id] = interval['to']\n elif 'revocation' in cd_id2cred_def[cd_id]['value']:\n LOGGER.debug(\n 'HolderProver.create_proof <!< brief on cred def id %s missing non-revocation interval',\n cd_id)\n raise AbsentInterval('Brief on cred def id {} missing non-revocation interval'.format(cd_id))\n if rr_id in rr_id2cr_id:\n continue\n rr_id2cr_id[rr_id] = cred_info['cred_rev_id']\n\n rr_id2rev_state = {} # revocation registry identifier to its state\n with REVO_CACHE.lock:\n for rr_id in rr_id2timestamp:\n revo_cache_entry = REVO_CACHE.get(rr_id, None)\n tails = revo_cache_entry.tails if revo_cache_entry else None\n if tails is None: # missing tails file\n LOGGER.debug('HolderProver.create_proof <!< missing tails file for rev reg id %s', rr_id)\n raise AbsentTails('Missing tails file for rev reg id {}'.format(rr_id))\n rr_def_json = await self.get_rev_reg_def(rr_id)\n (rr_delta_json, ledger_timestamp) = await revo_cache_entry.get_delta_json(\n self._build_rr_delta_json,\n rr_id2timestamp[rr_id],\n rr_id2timestamp[rr_id])\n rr_state_json = await anoncreds.create_revocation_state(\n tails.reader_handle,\n rr_def_json,\n rr_delta_json,\n ledger_timestamp,\n rr_id2cr_id[rr_id])\n rr_id2rev_state[rr_id] = {\n rr_id2timestamp[rr_id]: json.loads(rr_state_json)\n }\n\n rv = await anoncreds.prover_create_proof(\n self.wallet.handle,\n json.dumps(proof_req),\n json.dumps(requested_creds),\n label,\n json.dumps(s_id2schema),\n json.dumps(cd_id2cred_def),\n json.dumps(rr_id2rev_state))\n LOGGER.debug('HolderProver.create_proof <<< %s', rv)\n return rv", "async def construct_proof(self, holder_id: str, proof_req: dict,\n wql_filters: dict = None,\n cred_ids: set = None) -> messages.ConstructedProof:\n return await self._fetch(\n messages.ConstructProofReq(\n holder_id,\n messages.ProofRequest(proof_req, wql_filters), cred_ids),\n messages.ConstructedProof)", "def api_proof(api_key, request_id):\r\n\twith db.connect() as connection:\r\n\t\tdata_record = db.get_data_record_by_request_id(connection, request_id)\r\n\t\tproof_available = True if int((datetime.datetime.now() - data_record[\"timestamp\"]).total_seconds()) > int(os.getenv(\"RIGIDBIT_PROOF_DELAY\")) else False\r\n\t\tif proof_available:\r\n\t\t\theaders = {\"api_key\": os.getenv(\"RIGIDBIT_API_KEY\")}\r\n\t\t\turl = os.getenv(\"RIGIDBIT_BASE_URL\") + \"/api/trace-block/\" + str(data_record[\"block_id\"])\r\n\t\t\tcontent = requests.get(url, headers=headers).content\r\n\t\t\treturn Response(content, mimetype=\"application/json\", headers={\"Content-disposition\": f\"attachment; filename={request_id}.json\"})\r\n\t\telse:\r\n\t\t\treturn ({\"error\": \"Request ID is valid, but proof is not yet available.\"}, 202)", "async def generate_proof_request(self, spec_id: str) -> messages.ProofRequest:\n return await self._fetch(\n messages.GenerateProofRequestReq(spec_id),\n messages.ProofRequest)", "async def verify_proof(self, proof_req: dict, proof: dict) -> str:\n\n logger = logging.getLogger(__name__)\n logger.debug('Verifier.verify_proof: >>> proof_req: {}, proof: {}'.format(\n proof_req,\n proof))\n\n claims = proof['identifiers']\n uuid2schema = {}\n uuid2claim_def = {}\n for claim_uuid in claims:\n claim_s_key = schema_key_for(claims[claim_uuid]['schema_key'])\n schema = json.loads(await self.get_schema(claim_s_key))\n uuid2schema[claim_uuid] = schema\n uuid2claim_def[claim_uuid] = json.loads(await self.get_claim_def(\n schema['seqNo'],\n claims[claim_uuid]['issuer_did']))\n\n rv = json.dumps(await anoncreds.verifier_verify_proof(\n json.dumps(proof_req),\n json.dumps(proof),\n json.dumps(uuid2schema),\n json.dumps(uuid2claim_def),\n json.dumps({}))) # revoc_regs_json\n\n logger.debug('Verifier.verify_proof: <<< {}'.format(rv))\n return rv", "def test_respond_to_enquire_link_explicit(self):\n fake_smsc = FakeSMSC()\n client = self.successResultOf(self.connect(fake_smsc))\n self.assertEqual(client.received, b\"\")\n\n rtel_d = fake_smsc.respond_to_enquire_link(EnquireLink(2).obj)\n yield wait0()\n # enquire_link response received.\n self.successResultOf(rtel_d)\n self.assertEqual(client.received, EnquireLinkResp(2).get_bin())", "def genproof(publickey, data, authenticators, challenge):\n pass", "def verify_non_interactive_proof_showing_protocol(proof,pk,right_side_commit,disclosed_attributes, random_signature, message):\n nb_attr = int((len(pk) - 3) / 2)\n\n R = jsonpickle.decode(proof[0])\n sm = proof[1]\n st = jsonpickle.decode(proof[2])\n random_signature = (jsonpickle.decode(random_signature[0]),jsonpickle.decode(random_signature[1]))\n right_side_commit = jsonpickle.decode(right_side_commit)\n\n #computing challenge from all public info: public key, commitment and R, as well as message m\n #doing SHA256 hash of the concat binary of the public info\n challenge = right_side_commit.to_binary() + R.to_binary() + message\n for i in range(0,len(pk)):\n challenge = challenge + jsonpickle.decode(pk[i]).to_binary()\n challenge = hashlib.sha256(challenge).digest()\n #convert challenge to Bn\n challenge = Bn.from_binary(challenge)\n\n verif = right_side_commit.pow(challenge)\n for i in sm:\n verif = verif * ((random_signature[0].pair(jsonpickle.decode(pk[3 + nb_attr + i]))).pow(jsonpickle.decode(sm[i])))\n verif = verif * (random_signature[0].pair(jsonpickle.decode(pk[1 + nb_attr]))).pow(st)\n\n #need to compute left side to check if it's equal to right side commitment using the bilinear function:\n left_side = random_signature[1].pair(jsonpickle.decode(pk[1 + nb_attr]))\n for i in disclosed_attributes:\n left_side = left_side * ((random_signature[0].pair(jsonpickle.decode(pk[3 + nb_attr + i]))).pow(-Bn.from_binary(disclosed_attributes[i].encode())))\n left_side = left_side / (random_signature[0].pair(jsonpickle.decode(pk[2 + nb_attr])))\n\n #check if verif == R and if left_side == right_side_commitment\n return ((R == verif) and (left_side == right_side_commit))", "def web_proof(request_id):\r\n\twith db.connect() as connection:\r\n\t\tdata_record = db.get_data_record_by_request_id(connection, request_id)\r\n\t\tif data_record != None:\r\n\t\t\tproof_available = True if int((datetime.datetime.now() - data_record[\"timestamp\"]).total_seconds()) > int(os.getenv(\"RIGIDBIT_PROOF_DELAY\")) else False\r\n\t\t\tif proof_available:\r\n\t\t\t\theaders = {\"api_key\": os.getenv(\"RIGIDBIT_API_KEY\")}\r\n\t\t\t\turl = os.getenv(\"RIGIDBIT_BASE_URL\") + \"/api/trace-block/\" + str(data_record[\"block_id\"])\r\n\t\t\t\tcontent = requests.get(url, headers=headers).content\r\n\t\t\t\treturn Response(content, mimetype=\"application/json\", headers={\"Content-disposition\": f\"attachment; filename={request_id}.json\"})\r\n\t\treturn render_template(\"error.html\", page_title=misc.page_title(\"404\"), data={\"header\": \"404\", \"error\": f\"\"\"Request ID not found: {request_id}\"\"\"}), 404", "def test_respond_to_enquire_link(self):\n fake_smsc = FakeSMSC()\n client = self.successResultOf(self.connect(fake_smsc))\n self.assertEqual(client.received, b\"\")\n\n rtel_d = fake_smsc.respond_to_enquire_link()\n yield client.write(EnquireLink(2).get_bin())\n # enquire_link response received.\n self.assertNoResult(rtel_d)\n self.assertEqual(client.received, EnquireLinkResp(2).get_bin())\n\n yield wait0()\n self.successResultOf(rtel_d)", "def _check_proof_vs_proposal():\n proof_req = pres_ex_record.pres_request.attachment(\n IndyPresExchangeHandler.format\n )\n\n # revealed attrs\n for reft, attr_spec in proof[\"requested_proof\"][\"revealed_attrs\"].items():\n proof_req_attr_spec = proof_req[\"requested_attributes\"].get(reft)\n if not proof_req_attr_spec:\n raise V20PresFormatHandlerError(\n f\"Presentation referent {reft} not in proposal request\"\n )\n req_restrictions = proof_req_attr_spec.get(\"restrictions\", {})\n\n name = proof_req_attr_spec[\"name\"]\n proof_value = attr_spec[\"raw\"]\n sub_proof_index = attr_spec[\"sub_proof_index\"]\n schema_id = proof[\"identifiers\"][sub_proof_index][\"schema_id\"]\n cred_def_id = proof[\"identifiers\"][sub_proof_index][\"cred_def_id\"]\n criteria = {\n \"schema_id\": schema_id,\n \"schema_issuer_did\": schema_id.split(\":\")[-4],\n \"schema_name\": schema_id.split(\":\")[-2],\n \"schema_version\": schema_id.split(\":\")[-1],\n \"cred_def_id\": cred_def_id,\n \"issuer_did\": cred_def_id.split(\":\")[-5],\n f\"attr::{name}::value\": proof_value,\n }\n\n if (\n not any(r.items() <= criteria.items() for r in req_restrictions)\n and len(req_restrictions) != 0\n ):\n raise V20PresFormatHandlerError(\n f\"Presented attribute {reft} does not satisfy proof request \"\n f\"restrictions {req_restrictions}\"\n )\n\n # revealed attr groups\n for reft, attr_spec in (\n proof[\"requested_proof\"].get(\"revealed_attr_groups\", {}).items()\n ):\n proof_req_attr_spec = proof_req[\"requested_attributes\"].get(reft)\n if not proof_req_attr_spec:\n raise V20PresFormatHandlerError(\n f\"Presentation referent {reft} not in proposal request\"\n )\n req_restrictions = proof_req_attr_spec.get(\"restrictions\", {})\n proof_values = {\n name: values[\"raw\"] for name, values in attr_spec[\"values\"].items()\n }\n sub_proof_index = attr_spec[\"sub_proof_index\"]\n schema_id = proof[\"identifiers\"][sub_proof_index][\"schema_id\"]\n cred_def_id = proof[\"identifiers\"][sub_proof_index][\"cred_def_id\"]\n criteria = {\n \"schema_id\": schema_id,\n \"schema_issuer_did\": schema_id.split(\":\")[-4],\n \"schema_name\": schema_id.split(\":\")[-2],\n \"schema_version\": schema_id.split(\":\")[-1],\n \"cred_def_id\": cred_def_id,\n \"issuer_did\": cred_def_id.split(\":\")[-5],\n **{\n f\"attr::{name}::value\": value\n for name, value in proof_values.items()\n },\n }\n\n if (\n not any(r.items() <= criteria.items() for r in req_restrictions)\n and len(req_restrictions) != 0\n ):\n raise V20PresFormatHandlerError(\n f\"Presented attr group {reft} does not satisfy proof request \"\n f\"restrictions {req_restrictions}\"\n )\n\n # predicate bounds\n for reft, pred_spec in proof[\"requested_proof\"][\"predicates\"].items():\n proof_req_pred_spec = proof_req[\"requested_predicates\"].get(reft)\n if not proof_req_pred_spec:\n raise V20PresFormatHandlerError(\n f\"Presentation referent {reft} not in proposal request\"\n )\n req_name = proof_req_pred_spec[\"name\"]\n req_pred = Predicate.get(proof_req_pred_spec[\"p_type\"])\n req_value = proof_req_pred_spec[\"p_value\"]\n req_restrictions = proof_req_pred_spec.get(\"restrictions\", {})\n for req_restriction in req_restrictions:\n for k in list(req_restriction): # cannot modify en passant\n if k.startswith(\"attr::\"):\n req_restriction.pop(k) # let indy-sdk reject mismatch here\n sub_proof_index = pred_spec[\"sub_proof_index\"]\n for ge_proof in proof[\"proof\"][\"proofs\"][sub_proof_index][\n \"primary_proof\"\n ][\"ge_proofs\"]:\n proof_pred_spec = ge_proof[\"predicate\"]\n if proof_pred_spec[\"attr_name\"] != canon(req_name):\n continue\n if not (\n Predicate.get(proof_pred_spec[\"p_type\"]) is req_pred\n and proof_pred_spec[\"value\"] == req_value\n ):\n raise V20PresFormatHandlerError(\n f\"Presentation predicate on {req_name} \"\n \"mismatches proposal request\"\n )\n break\n else:\n raise V20PresFormatHandlerError(\n f\"Proposed request predicate on {req_name} not in presentation\"\n )\n\n schema_id = proof[\"identifiers\"][sub_proof_index][\"schema_id\"]\n cred_def_id = proof[\"identifiers\"][sub_proof_index][\"cred_def_id\"]\n criteria = {\n \"schema_id\": schema_id,\n \"schema_issuer_did\": schema_id.split(\":\")[-4],\n \"schema_name\": schema_id.split(\":\")[-2],\n \"schema_version\": schema_id.split(\":\")[-1],\n \"cred_def_id\": cred_def_id,\n \"issuer_did\": cred_def_id.split(\":\")[-5],\n }\n\n if (\n not any(r.items() <= criteria.items() for r in req_restrictions)\n and len(req_restrictions) != 0\n ):\n raise V20PresFormatHandlerError(\n f\"Presented predicate {reft} does not satisfy proof request \"\n f\"restrictions {req_restrictions}\"\n )", "def acquire_card_request(context):\n game_id = int(context.table.rows[0]['game id'])\n player_id = int(context.table.rows[0]['player id'])\n card_id = int(context.table.rows[0]['card id'])\n\n _, result = context.clients.card_broker.cardOperations.acquire_card(\n acquireCardRequest={\n 'playerId': player_id,\n 'gameId': game_id,\n 'cardId': card_id\n }\n ).result()\n\n assert_that(result.status_code, equal_to(200))", "async def get_cred_briefs_by_proof_req_q(self, proof_req_json: str, x_queries_json: str = None) -> str:\n\n LOGGER.debug(\n ('HolderProver.get_cred_briefs_by_proof_req_q >>> proof_req_json: %s, x_queries_json: %s'),\n proof_req_json,\n x_queries_json)\n\n if not self.wallet.handle:\n LOGGER.debug('HolderProver.get_cred_briefs_by_proof_req_q <!< Wallet %s is closed', self.name)\n raise WalletState('Wallet {} is closed'.format(self.name))\n\n def _pred_filter(brief):\n nonlocal pred_refts\n for attr, preds in pred_refts.get(brief['cred_info']['cred_def_id'], {}).items():\n if any(Predicate.get(p[0]).value.no(brief['cred_info']['attrs'][attr], p[1]) for p in preds.values()):\n return False\n return True\n\n rv = {}\n item_refts = set()\n x_queries = json.loads(x_queries_json or '{}')\n for k in x_queries:\n x_queries[k] = canon_cred_wql(x_queries[k]) # indy-sdk requires attr name canonicalization\n item_refts.add(k)\n\n proof_req = json.loads(proof_req_json)\n item_refts.update(uuid for uuid in proof_req['requested_predicates'])\n if not x_queries:\n item_refts.update(uuid for uuid in proof_req['requested_attributes']) # get all req attrs if no extra wql\n handle = await anoncreds.prover_search_credentials_for_proof_req(\n self.wallet.handle,\n proof_req_json,\n json.dumps(x_queries) if x_queries else None)\n pred_refts = proof_req_pred_referents(proof_req)\n\n try:\n for item_referent in item_refts:\n count = Wallet.DEFAULT_CHUNK\n while count == Wallet.DEFAULT_CHUNK:\n fetched = json.loads(await anoncreds.prover_fetch_credentials_for_proof_req(\n handle,\n item_referent,\n Wallet.DEFAULT_CHUNK))\n count = len(fetched)\n for brief in fetched: # apply predicates from proof req here\n if brief['cred_info']['referent'] not in rv and _pred_filter(brief):\n rv[brief['cred_info']['referent']] = brief\n finally:\n await anoncreds.prover_close_credentials_search_for_proof_req(handle)\n\n rv_json = json.dumps(rv)\n LOGGER.debug('HolderProver.get_cred_briefs_by_proof_req_q <<< %s', rv_json)\n return rv_json", "def initiate(address='127.0.0.1', port=9050, versions=[4, 5]):\n\n # Setup context\n peer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ctxt = ssl.SSLContext(ssl.PROTOCOL_TLS)\n\n # https://trac.torproject.org/projects/tor/ticket/28616\n ctxt.options |= ssl.OP_NO_TLSv1_3\n\n # Establish connection\n peer = ctxt.wrap_socket(peer)\n peer.connect((address, port))\n\n # VERSIONS handshake\n version = negotiate_version(peer, versions, as_initiator=True)\n\n # Wraps with socket.io\n peer = lnn.socket.io(peer)\n\n # Get CERTS, AUTH_CHALLENGE and NETINFO cells afterwards\n certs_cell = lnn.cell.certs.cell(peer.recv())\n auth_cell = lnn.cell.challenge.cell(peer.recv())\n netinfo_cell = lnn.cell.netinfo.cell(peer.recv())\n\n # Sanity checks\n if not certs_cell.valid:\n raise RuntimeError('Invalid CERTS cell: {}'.format(certs_cell.raw))\n if not auth_cell.valid:\n raise RuntimeError('Invalid AUTH_CHALLENGE cell:{}'.format(\n auth_cell.raw))\n if not netinfo_cell.valid:\n raise RuntimeError('Invalid NETINFO cell: {}'.format(netinfo_cell.raw))\n\n # Send our NETINFO to say \"we don't want to authenticate\"\n peer.send(lnn.cell.netinfo.pack(address))\n return link(peer, version)", "def handle_request(sock):\n aphorism = recv_until(sock, b'?')\n answer = get_answer(aphorism)\n sock.sendall(answer)", "def verify_disclosure_proof(\n pk: PublicKey,\n disclosure_proof: DisclosureProof,\n message: bytes\n ) -> bool:\n right_side_commit = disclosure_proof[0]\n random_signature = disclosure_proof[1]\n disclosed_attr_index_dic = disclosure_proof[2]\n proof = disclosure_proof[3]\n\n proof_verif = verify_non_interactive_proof_showing_protocol(proof,pk,right_side_commit,disclosed_attr_index_dic, random_signature, message)\n neutral_verif = jsonpickle.decode(random_signature[0]).is_neutral_element()\n\n return proof_verif and not neutral_verif", "def Grant(self, request, ssl_cert=None, ssl_key=None):\n pass", "def test_handshake(self):\n cli, svr, p = connectedServerAndClient(\n ServerClass=SecurableProto,\n ClientClass=SecurableProto)\n\n okc = OKCert()\n svr.certFactory = lambda : okc\n\n cli.callRemote(\n amp.StartTLS, tls_localCertificate=okc,\n tls_verifyAuthorities=[PretendRemoteCertificateAuthority()])\n\n # let's buffer something to be delivered securely\n L = []\n cli.callRemote(SecuredPing).addCallback(L.append)\n p.flush()\n # once for client once for server\n self.assertEqual(okc.verifyCount, 2)\n L = []\n cli.callRemote(SecuredPing).addCallback(L.append)\n p.flush()\n self.assertEqual(L[0], {'pinged': True})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
creates a directory for images to be stored
def create_image_directory(): if not os.path.exists("Images"): os.makedirs("Images")
[ "def createImages():\n if not isdir(\"images\"):\n mkdir(\"images\")", "def make_image_directory():\n directory = '/Users/jon/PycharmProjects/Cleveland_VAE/data/images/' + hyperparameter_string\n if not os.path.exists(directory):\n os.makedirs(directory)", "def get_image_dir():\n now = datetime.datetime.now()\n image_dir = base_dir() + \"/images/\" + datetime.datetime.strftime(now, '%Y%m%d%H%M%S')\n if not os.path.exists(image_dir):\n os.makedirs(image_dir)\n return image_dir", "def create_folders(self):\n if not os.path.exists(self.data_folder):\n os.mkdir(self.data_folder)\n if not os.path.exists(self.weigths_folder):\n os.mkdir(self.weigths_folder)\n if not os.path.exists(self.predict_folder):\n os.mkdir(self.predict_folder)\n if not os.path.exists(self.big_image_folder):\n os.mkdir(self.big_image_folder)\n if not os.path.exists(self.prediction_product_path):\n os.mkdir(self.prediction_product_path)", "def create_directories():\n\n # create download directory if doesn't exist\n if not os.path.exists(IOP_DOWNLOAD_DIR):\n os.makedirs(IOP_DOWNLOAD_DIR)\n\n # create unpack directory if doesn't exist\n if not os.path.exists(IOP_UNPACK_FOLDER):\n os.makedirs(IOP_UNPACK_FOLDER)", "def create_files_dir(self):\n raw_data_dir = self.data_path / \"raw\"\n raw_data_dir.mkdir(exist_ok=True)\n\n processed_data_dir = self.data_path / \"processed\"\n processed_data_dir.mkdir(exist_ok=True)\n\n self.files_dir = self.data_path / \"raw\" / self.author_id\n\n print(\"Author's directory:\", self.files_dir.absolute())\n\n self.files_dir.mkdir(exist_ok=True)", "def _prepare_output_path(self):\n\n self._image_dir = os.path.join(self._output_dir, 'images')\n self._annotation_dir = os.path.join(self._output_dir, 'annotations')\n self._resized_dir = os.path.join(self._output_dir, 'resized')\n\n if not os.path.exists(self._output_dir):\n os.makedirs(self._output_dir)\n\n if not os.path.exists(self._image_dir):\n os.makedirs(self._image_dir)\n\n if not os.path.exists(self._annotation_dir):\n os.makedirs(self._annotation_dir)\n\n if not os.path.exists(self._resized_dir):\n os.makedirs(self._resized_dir)", "def create_directories():\n if not os.path.exists(DATA_DIRECTORY):\n os.makedirs(DATA_DIRECTORY)\n if not os.path.exists(OUTPUT_DIRECTORY):\n os.makedirs(OUTPUT_DIRECTORY)", "def create_directory_structure():\n if not os.path.exists(APP_SAVE_DIR):\n os.mkdir(APP_SAVE_DIR)\n\n if not os.path.exists(APP_TEMP_DIR):\n os.mkdir(APP_TEMP_DIR)", "def create_dir(savedir):\n if not os.path.isdir(savedir):\n print(\"dir {} does not exist, creating\".format(savedir))\n os.mkdir(savedir)", "def create_directory():\r\n\r\n # Create directory for all lyrics\r\n try:\r\n os.mkdir(lyricDirectory)\r\n except FileExistsError:\r\n pass\r\n\r\n # Create directory for specific billboard chart\r\n try:\r\n os.mkdir(lyricDirectory + \"/\" + chartSwitcher())\r\n except FileExistsError:\r\n pass", "def add_directory(self, local_dir):\n self.images.add_directory(os.path.abspath(local_dir))", "def _create_dir(self):\n self.out_fp = str(self.pb.wd + \n 'out_'+str(self.pb.conf_num) + '/')\n if not os.path.exists(self.out_fp):\n os.makedirs(self.out_fp)", "def init():\n main_backup_dir = '.wit'\n parent_dir = os.getcwd()\n new_dir = pathlib.Path() / parent_dir / main_backup_dir / 'images' #Changed syntax according to notes on submission\n new_dir.mkdir(parents=True, exist_ok=True)\n new_dir = pathlib.Path() / parent_dir / main_backup_dir / 'staging_area'\n new_dir.mkdir(parents=True, exist_ok=True)", "def directory_maker(dir_list):\n for path in dir_list:\n if not os.path.exists(path):\n os.mkdir(path)\n else:\n continue", "def createFolders():\n locations, verticals = readFiles()\n for vert in verticals:\n os.makedirs(\"./files/%s\" % vert, exist_ok=True)", "def create_folders_if_need_to(self):\n expected = [\n self.path('images'),\n self.path('meta'),\n self.path('thumbnails'),\n self.path('staging'),\n self.path('staging', 'images'),\n self.path('staging', 'meta'),\n self.path('staging', 'thumbnails'),\n self.path('staging', 'unconfirmed'),\n ]\n for each in expected:\n if not self.exists(each):\n self.mkdir(each)\n print(f'New folder created: {each}')", "def make_image_list(image_dir):", "def make_experiment_directory(self):\n self.save_dir = os.path.join(\n \"results\", self.name,\n self.config.embedding_strategy, self.start_time)\n\n # Prepend 'debug' when in debug mode\n if self.config.debug:\n self.save_dir = os.path.join(\"debug\", self.save_dir)\n\n # Create directory\n os.makedirs(self.save_dir)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get summary from all wikipedia pages with a title in titles
def get_summaries(titles, title_data): length = len(titles) index = 0 while index < length: multi_title = sanatize_url(titles[index]) for _ in range(20): # Collect 20 titles at a time if index < length: multi_title += '|' + sanatize_url(titles[index]) else: break index += 1 progress_update(index, length) wiki_request = requests.get(WIKI_API_URL+SUMMARIES_SEARCH_VARS+multi_title) root = ET.fromstring(wiki_request.content) # get 20 summaries pages = root.findall('query/pages/page') # find all pages for page in pages: # Add summaries to dict title_data[page.attrib['title']].append(page.find('extract').text)
[ "def get_wiki_summary(request):\n while 1:\n data = requests.get(\n f'https://en.wikipedia.org/w/api.php?action=query&titles={request}&prop=extracts&exintro=&exsentences=10&format=json&redirects'\n ).json()\n if '-1' in data['query']['pages']:\n if ',' not in request:\n return 'No information found.'\n request = re.sub(r',[^,]*?$', '', request)\n else:\n for item in data['query']['pages']:\n info = data['query']['pages'][item]['extract']\n info = re.sub(r' \\(<span>.*?</span>\\)', '', info)\n info = re.sub(r' <span>\\(.*?\\)</span>', '', info)\n info = re.sub(r' \\(.*?(lang|title).*?</span>\\)', '', info)\n return info\n return data", "def GetTitles(title,verbose=True):\r\n if verbose:\r\n try:\r\n print(title)\r\n except:\r\n print(\"Warning: 'gbk' can not encode unicode characters\")\r\n try:\r\n page = wikipedia.page(title)\r\n return page.links\r\n except:\r\n return []", "def get_titles_on_page(page):\n payload = {\n 'action': 'query',\n 'prop': 'links',\n 'pllimit': '500',\n 'format': 'json',\n 'titles': page\n }\n headers = {\n 'User-agent': 'holberton 0.1'\n }\n url = \"https://en.wikipedia.org/w/api.php\"\n r = requests.get(url, headers=headers, params=payload)\n pages = r.json().get(\"query\").get(\"pages\")\n for v in pages.values():\n link_val = v\n links = link_val.get(\"links\")\n if links is None:\n return(set())\n titles = set()\n for link in links:\n titles.add(link.get('title'))\n return (titles)", "def GetUrls(titles):\r\n links = []\r\n for title in titles:\r\n page = wikipedia.page(title)\r\n links.append(page.url)\r\n return links", "def get_titles_linked_to_page(page):\n payload = {\n 'action': 'query',\n 'prop': 'linkshere',\n 'lhlimit': '500',\n 'format': 'json',\n 'titles': page\n }\n headers = {\n 'User-agent': 'holberton 0.1'\n }\n url = \"https://en.wikipedia.org/w/api.php\"\n r = requests.get(url, headers=headers, params=payload)\n pages = r.json().get(\"query\").get(\"pages\")\n for v in pages.values():\n link_val = v\n links = link_val.get(\"linkshere\")\n if links is None:\n return(set())\n titles = set()\n for link in links:\n titles.add(link.get('title'))\n return (titles)", "def getPage(self,title):\n return wikipedia.page(title)", "def extract(title, sentence_count):\n api_url = 'https://de.wikipedia.org/w/api.php?action=query&format=json&titles=' + \\\n title + '&prop=extracts&exintro=true&exsentences=' + \\\n str(sentence_count) # + '&explaintext=true&exsectionformat=plain'\n j = requests.get(api_url).json()\n extract = list(j['query']['pages'].values())[0]['extract']\n if '\\n' in extract:\n extract = extract.replace('\\n', ' ')\n return extract", "def get_wikipedia_article( article_title ):\n failed = True\n while failed:\n failed = False\n try:\n req = urllib2.Request('http://en.wikipedia.org/w/index.php?title=Special:Export/%s&action=submit' \\\n % (article_title),\n None, { 'User-Agent' : 'x'})\n f = urllib2.urlopen(req)\n all = f.read()\n except (urllib2.HTTPError, urllib2.URLError):\n print 'oops. there was a failure downloading %s. retrying...' \\\n % article_title\n failed = True\n continue\n print 'downloaded %s. parsing...' % article_title\n \n try:\n all = re.search(r'<text.*?>(.*)</text', all, flags=re.DOTALL).group(1)\n all = remove_braces( all )\n #remove references\n all = re.sub(r'&lt;','<',all)\n all = re.sub(r'&gt;','>',all)\n all = re.sub(r'<ref>([\\s\\S]*?)(</ref>?)', '', all)\n all = re.sub(r'<ref name=(.*?)/>', '', all)\n all = re.sub(r'<ref name=(.*?)>([\\s\\S]*?)</ref>', '', all)\n #remove comments\n all = re.sub(r'<!--(.*?)-->', '', all)\n #formatting and double quotes\n \n all = re.sub(r'\\n', ' ', all)\n all = re.sub(r\"''\", '', all)\n #replace &amp;nbsp; with a space\n all = re.sub(r'&amp;nbsp;', ' ', all)\n #all = re.sub(r'\\{\\{.*?\\}\\}', r'', all)\n all = re.sub(r'\\[\\[Category:.*', '', all)\n all = re.sub(r'==\\s*[Ss]ource\\s*==.*', '', all)\n all = re.sub(r'==\\s*[Rr]eferences\\s*==.*', '', all)\n all = re.sub(r'==\\s*[Ee]xternal [Ll]inks\\s*==.*', '', all)\n all = re.sub(r'==\\s*[Ee]xternal [Ll]inks and [Rr]eferences==\\s*', '', all)\n all = re.sub(r'==\\s*[Ss]ee [Aa]lso\\s*==.*', '', all)\n all = re.sub(r'http://[^\\s]*', '', all)\n all = re.sub(r'\\[\\[Image:.*?\\]\\]', '', all)\n all = re.sub(r'Image:.*?\\|', '', all)\n all = re.sub(r'&quot;', '', all)\n all = remove_brakets_file(all)\n #all = re.sub(r'\\[\\[File:.*?\\]\\]', '', all)\n all = replace_links_with_text(all)\n all = re.sub(r'={2,}','',all)\n all = re.sub(r\"'\",'',all)\n all = re.sub(r'\\{\\|(.*?)\\|\\}',\"\",all)\n #all = re.sub(r'\\[\\[.*?\\|*([^\\|]*?)\\]\\]', r'\\1', all)\n #all = re.sub(r'\\&lt;.*?&gt;', '', all)\n all = filter(lambda x: x in string.printable, all)\n except:\n # Something went wrong, try again. (This is bad coding practice.)\n print 'oops. there was a failure parsing %s. retrying...' \\\n % article_title\n failed = True\n continue\n return(all)", "def get_wiki_summary(name):\n url = \"http://www.mantidproject.org/index.php?title=%s&action=raw&section=1\" % name\n \n webFile = urllib.urlopen(url)\n wiki = webFile.read()\n webFile.close()\n out = \"\"\n wikified = \"\"\n for line in wiki.split(\"\\n\"):\n line = line.strip()\n if (line != \"== Summary ==\") and (line != \"==Summary==\"):\n # Keep all markup in here\n wikified += line + \" \"\n # And strip it out for this one\n out += de_wikify(line) + \" \"\n \n if line.startswith(\"{{Binary\"):\n # Template in binary ops. Skip the rest\n break\n \n out = out.strip()\n return (out, wikified)", "def title_search (self, title):\n meta = None\n timing = 0.0\n message = None\n t0 = time.time()\n try:\n query = \"query.bibliographic={}\".format(urllib.parse.quote(title))\n url = self._get_api_url(query)\n\n response = requests.get(url).text\n json_response = json.loads(response)\n\n items = json_response[\"message\"][\"items\"]\n first_item = items[0] if len(items) > 0 else {}\n titles = first_item.get(\"title\", []) \n result_title = titles[0] if len(titles) > 0 else None\n\n if self.title_match(title, result_title):\n raw_meta = first_item\n meta = dict()\n if 'title' in raw_meta:\n meta['title'] = raw_meta[\"title\"]\n else:\n meta['title'] = None\n \n if 'DOI' in raw_meta:\n meta['doi'] = raw_meta[\"DOI\"]\n else:\n meta['doi'] = None\n \n if 'container-title' in raw_meta:\n meta['journal'] = raw_meta[\"container-title\"][0]\n else:\n meta['journal'] = None\n \n if 'ISSN' in raw_meta:\n meta['issn'] = raw_meta[\"ISSN\"][0]\n else:\n meta['issn'] = None\n\n if \"published-print\" in raw_meta:\n meta['year'] = raw_meta[\"published-print\"]['date-parts'][0][0] \n else:\n meta['year'] = None\n \n if 'author' in raw_meta:\n meta['authors'] = raw_meta[\"author\"]\n else:\n meta['authors'] = None\n \n if 'URL' in raw_meta:\n meta['url'] = raw_meta[\"URL\"]\n else:\n meta['url'] = None\n # meta = raw_meta\n if self.parent.logger:\n self.parent.logger.debug(meta)\n except: \n print(traceback.format_exc())\n meta = None\n message = f\"ERROR: {title}\"\n print(message) \n \n timing = self._mark_elapsed_time(t0)\n return _ScholInfraResponse_Crossref(self, meta, timing, message)", "def return_place_info(self, title):\n url = f\"https://fr.wikipedia.org/api/rest_v1/page/summary/{title}\"\n request = requests.get(url).json()\n place_info = request[\"extract\"]\n link = request[\"content_urls\"][\"desktop\"][\"page\"]\n bot_answer = place_info + \"</br>\" + f\"<a href='{link}' style='color:white;'>[En savoir plus sur Wikipedia]</a>\"\n return bot_answer", "def get_category_titles_from_each_page(list_of_urls):\n titles = []\n\n print('Retrieving data for each category:')\n with progressbar.ProgressBar(max_value=len(list_of_urls)) as bar:\n for counter, url in enumerate(list_of_urls):\n category_page = urlopen(url)\n scrape_data = BeautifulSoup(\n category_page, \"html.parser\") # BeatifulSoup Object\n title = scrape_data.h1.text\n titles.append(title)\n bar.update(counter)\n return titles", "def search_title(self, text: str) -> dict:\n logging.info(\"Searching wikipedia for text '{}'\".format(text))\n url: str = \"/w/api.php\"\n http_params: dict = {\n \"action\": \"query\",\n \"list\": \"search\",\n \"format\": \"json\",\n \"srsearch\": text.replace(\" \", \"%20\"),\n \"srlimit\": \"1\",\n \"srprop\": \"\"\n }\n url_with_params: str = helpers.add_http_parameters(url, http_params)\n\n http_client = self._connect_http_client()\n http_client.request(\"GET\", url_with_params)\n response: bytes = http_client.getresponse().read()\n http_client.close()\n\n return json.loads(response)", "def get_knowledge(term):\n summary = wikipedia.summary(term, sentences=2)\n return summary", "def get_categories_from_title(titles, title_data):\n length = len(titles)\n index = 0\n while index < length:\n multi_title = sanatize_url(titles[index])\n for _ in range(20): # Collect 20 titles at a time\n if index < length:\n multi_title += '|' + sanatize_url(titles[index])\n else:\n break\n index += 1\n progress_update(index, length)\n wiki_request = requests.get(WIKI_API_URL+TITLE_CAT_SEARCH_VARS+multi_title)\n root = ET.fromstring(wiki_request.content)\n pages = root.findall('query/pages/page') # find all pages\n for page in pages: # collect and add page categories to dict\n categories = [cl.attrib['title'].split(':', 1)[1] for cl in page.findall('categories/cl')]\n title_data[page.attrib['title']].append(repr(categories))", "def get_wiki(self, totText, totArticles):\n\t\tif self.config['WIKIPEDIA']['language'] == 'fr':\n\t\t\trandomUrl = 'https://fr.wikipedia.org/wiki/Sp%C3%A9cial:Page_au_hasard'\n\t\telse:\n\t\t\trandomUrl = 'https://en.wikipedia.org/wiki/Special:Random'\n\t\trandomPageData = request.urlopen(randomUrl).read().decode('utf-8') # Get random page and extract URL\n\t\trandomPageSoup = bs(randomPageData, 'html.parser')\n\t\turl = re.findall('<link href=\"(.*)\" rel=\"canonical\"/>', str(randomPageSoup))[0]\n\t\ttry:\n\t\t\tpageData = request.urlopen(url).read().decode('utf-8') # Scrap this URL\n\t\texcept Exception as E:\n\t\t\tprint('Error while downloading data ({}).'.format(E))\n\t\ttotArticles.append(url)\n\t\tpageSoup = bs(pageData, 'html.parser')\n\t\tdata_helper = Helpers(config=self.config)\n\t\trText = data_helper.clean_html(pageSoup.find_all('p')).lower()\n\t\ttotText.append(rText)\n\t\treturn totText, totArticles", "def get_wiki_articles(output_dir):\n\tif not os.path.exists(output_dir):\n\t\tos.mkdir(output_dir)\n\tq = get_top_terms(\"newsArticleCollection\", \"persons\", 100)\n\tif q['status'] == 'Unsuccessful':\n\t\tprint \"Solr request Unsuccessful\"\n\t\treturn\n\twords_init = q.get('words')\n\twords = Set()\n\tfor w in words_init:\n\t\tw = w.replace(',', '')\n\t\twords.update(w.split())\n\twords = list(words)\n\t# words_stemmed = [word.replace(',', '') for word in words]\n\t# words = words + words_stemmed\n\tlogger.info(words)\n\ts = requests.Session()\n\turl = \"http://en.wikipedia.org/w/index.php?title=Special:Export\"\n\tpages = Set()\n\tfor word in words:\n\t\tr = s.post(url, data=dict(action=\"submit\",catname=word, addcat=True))\n\t\tdoc = html5lib.parse(r.text)\n\t\ttree = html.fromstring(r.text)\n\t\tline = [td.text for td in tree.xpath(\"//*[@id='mw-content-text']/form/textarea\")]\n\t\tpages_obtained = []\n\t\tfor l in line:\n\t\t\tif l:\n\t\t\t\tpages_obtained = l.splitlines()\n\t\t\t\tfor pp in pages_obtained:\n\t\t\t\t\tpages.add(pp)\n\t\tlogger.info(\"%s page titles obtained for %s\" % (len(pages_obtained), word))\n\tpage_params = (\"%0A\").join(list(pages))\n\tlogger.info(\"A total of %s page titles obtained, now getting pages from Wikipedia...\" % len(pages))\n\tfrom_ = \"2000-01-27T20:25:56Z\"\n\turl = \"http://en.wikipedia.org/w/index.php?title=Special:Export&pages=%s&offset=%s&limit=10000&action=submit\"\n\turl = \"http://en.wikipedia.org/wiki/Special:Export/\"\n\tfor i, page in enumerate(pages):\n\t\tr = s.get(url+page)\n\t\twith open(os.path.join(output_dir, \"wiki_%s.xml\" % i), 'w') as out:\n\t\t\tout.write(r.text.encode('utf8'))", "async def summary(self):\n return await self.wiki.http.get_summary(self.title)", "def get_gt_top_stories(webpage_text):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get every category from every title in titles
def get_categories_from_title(titles, title_data): length = len(titles) index = 0 while index < length: multi_title = sanatize_url(titles[index]) for _ in range(20): # Collect 20 titles at a time if index < length: multi_title += '|' + sanatize_url(titles[index]) else: break index += 1 progress_update(index, length) wiki_request = requests.get(WIKI_API_URL+TITLE_CAT_SEARCH_VARS+multi_title) root = ET.fromstring(wiki_request.content) pages = root.findall('query/pages/page') # find all pages for page in pages: # collect and add page categories to dict categories = [cl.attrib['title'].split(':', 1)[1] for cl in page.findall('categories/cl')] title_data[page.attrib['title']].append(repr(categories))
[ "def get_news_with_categories():\n # saving articles urls in list\n articles = ['https://inshorts.com/en/read/business','https://inshorts.com/en/read/sports','https://inshorts.com/en/read/technology','https://inshorts.com/en/read/entertainment']\n # creating empty list\n l = []\n # iterating through each url\n for url in articles:\n # saving response from url page\n response = get(url)\n # parsing through response\n soup = BeautifulSoup(response.text)\n # finding all news cards within response\n cards = soup.find_all('div', class_='news-card')\n # iterating through cards\n for card in cards:\n # creating new dictionary\n d = {}\n # saving article title and body text\n title = (card.find('span', itemprop='headline').text)\n body = (card.find('div', itemprop='articleBody').text)\n # saving title and body text to dictionary\n d['title'] = title\n d['article'] = body\n d['category'] = url[29:]\n # adding dictionary to list\n l.append(d)\n # returning list\n return l", "def pagetitles_of_category(category):\n url = 'https://de.wikipedia.org/w/api.php?action=query&&format=json&list=categorymembers&cmprop=title&cmlimit=max&cmtitle=' + category\n query = requests.get(url).json()['query']['categorymembers']\n titlelist = []\n start = 1\n if category == 'Kategorie:Vogel des Jahres (Deutschland)':\n start = 1\n for i in range(start, len(query) - 1):\n titlelist.append(query[i]['title'])\n return titlelist", "def get_categories():\r\n return VIDEOS.iterkeys()", "def get_category_titles_from_each_page(list_of_urls):\n titles = []\n\n print('Retrieving data for each category:')\n with progressbar.ProgressBar(max_value=len(list_of_urls)) as bar:\n for counter, url in enumerate(list_of_urls):\n category_page = urlopen(url)\n scrape_data = BeautifulSoup(\n category_page, \"html.parser\") # BeatifulSoup Object\n title = scrape_data.h1.text\n titles.append(title)\n bar.update(counter)\n return titles", "def splitIntoCategories(tweets, categories):\n listDictionary = []\n\n #create category arrays\n for x in categories: #category dictionary\n listDictionary = listDictionary + splitTweets(x, tweets, [x.name])\n\n return listDictionary", "def APcats():\n\tAPkey = AP_keys[\"breaking-news\"]\n\tbase = \"Http://developerapi.ap.org/v2/categories.svc/?apiKey=%s\"\n\tr = requests.get(base % APkey)\n\tsoup = BeautifulSoup.BeautifulSoup(r.content, convertEntities=['xml', 'html'])\n\tfor entry in soup.findAll('entry'):\n\t\tname = str(entry.title.string)\n\t\tid = str(entry.id.string).split(':')[-1]\n\t\tyield \"%s,%s\" % (id, name)", "def getCurrentCats(imagepage):\n result = []\n for cat in imagepage.categories():\n result.append(cat.title(with_ns=False))\n return list(set(result))", "def get_category_id_2_title(self):\n pages_table = Tables.PageTable(self.P.pages)\n return dict(pages_table.select_id_title_of_categories())", "def format_categories(self, cat: str) -> Dict[str, str]:\n regex = \"<li><a href=\\\"(.*?)\\\" title=\\\"(.*?)\\\">(.*?)</a></li>\"\n cats = [{\"link\": x[0],\n \"category_name\": self.clean_html_encodings(x[1]),\n \"display_name\": self.clean_html_encodings(x[2])}\n for x in re.compile(regex).findall(cat)][0]\n return cats", "def getTitlesNLP(self,collection):\n #titles = collection.find({}, {\"title\": 1})\n titles = collection.find()\n tmp = []\n for d in titles:\n st = ''\n for ing in d['ingredients']:\n st+= ' ' + ing['name']\n\n #print st\n tmp.append(d['title'] + st)\n # print d\n return tmp", "def get_categories():\n return VIDEOS.keys()", "def categories_show(self):\n\n cursor = DatabaseManager.connection_to_database(self)\n\n cursor.execute(\"SELECT * FROM category\")\n\n my_results = cursor.fetchall()\n\n i = 1\n cat_list = []\n for cat_tuples in my_results:\n for cat_str in cat_tuples:\n cat_list2 = []\n cat_list2.append(i)\n cat_list2.append(cat_str)\n i += 1\n cat_list.append(cat_list2)\n\n for cat_list2 in cat_list:\n print(cat_list2)", "def chapters(self, title):\n r = requests.get(\"https://www.baka-tsuki.org/project/index.php?title={}\".format(title.replace(\" \", \"_\")),\n headers=self.header)\n if r.status_code != 200:\n raise requests.HTTPError(\"Not Found\")\n else:\n parsed = soup(r.text, 'html.parser')\n dd = parsed.find_all(\"a\")\n volumes = []\n for link in dd:\n if 'class' in link.attrs:\n if 'image' in link.get('class'):\n continue\n if 'href' in link.attrs:\n if re.search(self.chapter_regex, link.get('href')) is not None and not link.get('href').startswith('#'):\n volumes.append(link)\n seplist = OrderedDict()\n for item in volumes:\n result = re.search(self.separate_regex, item.get('title').lower())\n if result.group('chapter').lstrip('0') in seplist:\n seplist[result.group('chapter').lstrip('0')].append([item.get('href'), item.get('title')])\n else:\n seplist[result.group('chapter').lstrip('0')] = [[item.get('href'), item.get('title')]]\n return seplist", "def init_categories(self):\n table = self.bsoup.find(\"div\", {\"id\" : \"charts-list\"})\n current_cat = ''\n for child in table.children:\n name = child.name\n if name == 'h3':\n current_cat = child.get_text()\n self.categories[current_cat] = dict()\n elif name == 'article':\n chart = child.findChild(\"a\", {\"class\" : \"chart-row__chart-link\"})\n self.categories[current_cat][chart.get_text()] = chart['href']", "def getCategorias(self):\n database = self.database\n sql = \"SELECT idCategoria,Nombre FROM hermes.categoria;\"\n data = database.executeQuery(sql)\n lista = self.listToDicc(data)\n return lista", "def get_category_title_2_id(self):\n pages_table = Tables.PageTable(self.P.pages)\n return dict(FlipIt(pages_table.select_id_title_of_categories()))", "def get_cvat_categories(self):\n cvat_cats = []\n for cat_meta in self.cats.values():\n cvat_cats.append({\"name\": cat_meta[\"name\"], \"color\": \"\", \"attributes\": []})\n return cvat_cats", "def get_all_category(self):\n categories = Category.objects.all()\n return categories", "def getCategorySpecs(self, field, category_title):\n pc = getToolByName(self, 'portal_catalog')\n services = []\n for spec in field.getResultsRange():\n service = pc(portal_type='AnalysisService',\n getKeyword=spec['keyword'])[0].getObject()\n if service.getCategoryName() == category_title:\n services.append(spec)\n return services" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
downloads all images from the url_list into the Images directory
def download_images(url_list): print("\nDownloading images into Images folder:") length = len(url_list) for index, url in enumerate(url_list): # download all images progress_update(index, length) name = url.split('/')[-1] if len(name) > 250: # change name if name is too long name = name[0:50] + name[-4:] try: # download file to Images dir urllib.request.urlretrieve(url, "Images/"+name) except ValueError: # catch ValueError pass except urllib.error.HTTPError: # catch HTTPError pass progress_update(length, length)
[ "def download_images(self):\n \n if not os.path.exists(self.images_folder):\n os.makedirs(self.images_folder)\n print(f\"{Fore.GREEN}[+]{Style.RESET_ALL} `{self.images_folder}` folder created.\")\n \n for url in self.images(link=True):\n content = requests.get(url).content\n filename = url.split('/')[-1]\n filepath = os.path.join(self.images_folder, filename)\n \n if not os.path.exists(filepath):\n with open(filepath, mode=\"wb\") as file:\n file.write(content)\n print(f\"{Fore.GREEN}[+]{Style.RESET_ALL} {filename} downloaded.\")", "def downloading_all_photos(self):\n self.create_folder()\n pic_counter = 1\n for url_link in self.pic_url_list:\n print(pic_counter)\n pic_prefix_str = self.g_search_key + \"/\" + self.g_search_key + str(pic_counter)\n self.download_single_image(url_link.encode(), pic_prefix_str)\n pic_counter = pic_counter + 1", "def download_images(img_urls, dest_dir):\n #print dest_dir, img_urls\n try:\n full_path = os.path.abspath( dest_dir )\n except:\n print '*Directory error:', dirname\n sys.exit(1)\n #print 'full_path: ', full_path\n try:\n if not os.path.exists(full_path) :\n #print 'making directory:', full_path\n os.makedirs(full_path)\n except:\n print \"*Cannot make directory: \", full_path\n sys.exit(1)\n \n count = 0\n filename = 'img'\n for url in img_urls :\n basename = 'img' + str(count)\n filename = full_path + '/' + basename\n count += 1\n #print 'copy from :', url, '\\nto: ', filename\n print '.',\n try:\n urllib.urlretrieve(url, filename)\n #shutil.copy(filename, full_path)\n except:\n print \"\\n*File download error: from \", url, '\\n to ', filename\n #sys.exit(1)\n\n # write an html file with the images referred from the url's\n # do this instead of making references to local file images because\n # the VM has some issue with Python urllib open and it takes\n # several minutes per operation to perform or it just fails 100% of the time\n header = \"\"\"<verbatim>\n<html>\n<body>\n\"\"\"\n footer = \"\"\"\n</body>\n</html>\n\"\"\" \n file_handle_web = open('index_web.html', 'w')\n file_handle_web.write( header )\n\n for url in img_urls:\n file_handle_web.write( '<img src=' + url + '>')\n\n file_handle_web.write( footer )\n file_handle_web.close()\n\n #\n # continued development on an non VM and urllib is workable\n #\n # write html file to reference images in directory\n file_list = sorted(os.listdir( full_path ), key=key_fname)\n #print file_list\n file_handle_file = open('index_file.html', 'w')\n file_handle_file.write( header )\n\n for file in file_list:\n file_handle_file.write( '<img src=' + full_path + '/' + file + '>')\n\n file_handle_file.write( footer )\n file_handle_file.close()", "def download_imgs(img_urls: List[str]) -> str:\n tmp_dir = tempfile.mkdtemp()\n\n for img_url in img_urls:\n leaf_name = urlparse(img_url).path.split('/')[-1]\n local_path = os.path.join(tmp_dir, leaf_name)\n urlretrieve(img_url, filename=local_path)\n\n return tmp_dir", "def download_image(imageList, name, ddir):\n for i, image in enumerate(imageList):\n wget.download(image, out= ddir + str(name + '_' +str(i)) + '.jpg')", "def download_images(img_urls, dest_dir):\n imgIndex = 0\n if not(os.path.exists(dest_dir)):\n os.makedirs(dest_dir)\n for thisURL in img_urls:\n #print thisURL #TESTING\n outFile = dest_dir + \"/img\" + str(imgIndex)\n print(\"Retrieving: img\" + str(imgIndex))\n urllib.urlretrieve(thisURL, outFile)\n imgIndex += 1\n indexFOut = open(dest_dir + \"/index.html\", 'w')\n indexFOut.write(\"<verbatim>\\n<html>\\n<body>\\n\")\n for thisIndex in xrange(imgIndex): #already +1 from last loop before\n indexFOut.write('<img src=\"' + os.path.abspath(dest_dir + \"/img\" + str(thisIndex)) + '\">')\n indexFOut.write(\"\\n</body>\\n</html>\\n\")\n indexFOut.close()", "def collect_images(self):\n self.__get_images_link()\n url_length: int = len(self.img_url_list)\n self.logger.info(\"Starting downloading for {} images...\".format(url_length))\n for url, index in zip(self.img_url_list, range(url_length)):\n state, image, shape = self.download_image(url, index)\n self.logger.debug({\"url\": url, \"timestamp\": datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")})\n self.documents.append({\"url\": url,\n \"image\": self.encode_image(image),\n \"shape\": shape,\n \"state\": state,\n \"timestamp\": datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")})\n return self.documents", "def download_images(self, img_urls, dest_dir):\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n \n index = file(os.path.join(dest_dir, 'overview.html'), 'w')\n index.write('<html><body>\\n')\n \n for img_url in img_urls:\n \n img_name = img_url.split('/')[-1]\n img_name = re.sub('[^0-9a-zA-Z]+', '_', img_name.split('.')[-2]) + '.' + img_url.split('.')[-1]\n try:\n response = requests.get(img_url, stream=True)\n with open(dest_dir + '/' + img_name, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file) \n \n index.write('<img src=\"%s\"><p>/n\"%s\"</p>' % (img_name,img_name,))\n \n except Exception as e:\n print e\n \n index.write('\\n</body></html>\\n')\n index.close()", "def get_images(self):\r\n\r\n image_index = 0\r\n line_index = 1\r\n for link in self.raw_list:\r\n\r\n try:\r\n self.get_one_image(link, image_index)\r\n except ValueError:\r\n logging.warning(\"Line {}: broken or unsafe URL format.\".format(line_index))\r\n except urllib.request.URLError:\r\n logging.warning(\"Line {}: {} -- URL error.\".format(line_index, link))\r\n except TypeError:\r\n logging.warning(\"Line {}: {} -- does not point to an image.\".format(line_index, link))\r\n else:\r\n image_index += 1\r\n\r\n line_index += 1\r\n\r\n logging.info(\"------------- Download finished -------------\\n\")", "def download_images(search, n):\n if not os.path.exists('images'):\n os.mkdir('images')\n tagdir = os.path.join('images', search)\n if not os.path.exists(tagdir):\n os.mkdir(tagdir)\n for url in search_images(search, n):\n r = requests.get(url)\n fname = url.rsplit('/')[-1]\n dest = os.path.join(tagdir, fname)\n # print(\"downloading %s => %s\" % (url, dest))\n sys.stdout.write('+')\n sys.stdout.flush()\n with open(dest, 'wb') as f:\n f.write(r.content)", "def download_files(directory, url_list):\n\n for url in url_list:\n file = directory + url.split(\"/\", -1)[-1]\n try:\n urlreq.urlretrieve(url, file)\n except URLError as e:\n print(e)", "def download_images(self, blobs: List[str], path: Union[str, Path]):\n for blob_name in blobs:\n try:\n self.download_image(blob_name, path)\n except Exception as e:\n print(f'Downloading {blob_name} failed!')\n print(e)", "def download_files(urls, save_dir=\"tmp/\"):\n for url in urls:\n download_file(url, save_dir, None)", "def scrape_images(url):\n res = requests.get(url)\n assert res.status_code == 200\n\n regex = re.compile('<img (?:(?:.|\\n)*?)src=\"(.*?([^/]*?))\"[\\s/]*>')\n matches = regex.findall(res.text)\n for image_with_relative_url, image_file_name in matches:\n res_image = requests.get(url + image_with_relative_url)\n assert res.status_code == 200\n\n with open(image_file_name, 'wb') as f:\n f.write(res_image.content)", "def make_image_list(image_dir):", "def save_images(links, search_name):\r\n directory = search_name.replace(' ', '_')\r\n if not os.path.isdir(directory):\r\n os.mkdir(directory)\r\n\r\n for i, link in enumerate(links):\r\n savepath = os.path.join(directory, '{:06}.png'.format(i))\r\n ulib.urlretrieve(link, savepath)", "def download_images():\n if not os.path.exists(FLOWERS_DIR):\n DOWNLOAD_URL = 'http://download.tensorflow.org/example_images/flower_photos.tgz'\n print('Downloading flower images from %s...' % DOWNLOAD_URL)\n urllib.request.urlretrieve(DOWNLOAD_URL, 'flower_photos.tgz')\n get_ipython().system('tar xfz flower_photos.tgz')\n print('Flower photos are located in %s' % FLOWERS_DIR)\n print(os.getcwd())\n print(os.path.abspath(FLOWERS_DIR))", "def _get_images(ig_url, filename):\n # extensions = ('.png', '.jpg', '.jpeg', '.gif', '.tiff', '.bmp',)\n # vid_extensions = ('.mp4', '.mpeg', '.mpg', '.m4p', '.m4v', '.mp2', '.avi',)\n response = requests.get(ig_url)\n app.logger.debug(response)\n soup = bs(response.text, \"html.parser\")\n app.logger.debug(soup)\n images = [img.get('src') for img in soup.findAll('img') if not re.search(\"^\\/\", img.get('src'))]\n app.logger.debug(images)\n goal, bonus = len(images), 0\n file_count = 1\n for image in images:\n # TODO: The following steps are not fully implemented.\n # Check if the src pointed to actual images, or a web page\n # 1) regex to grab the file extension\n name, ext = path.splitext(image)\n # 2) if file extension exists, confirm it matches known image extensions.\n if ext:\n # extension = 'png' # example, but actually set according to a.\n # a) set the output filename to have the same file extension as original file.\n urllib.request.urlretrieve(image, f\"{filename}_{file_count}.{ext}\")\n else:\n # 3) if no file extension or doesn't match known extensions, assume a web page view.\n recur_goal, recur_found = _get_images(image, f\"filename_{file_count}\")\n goal += recur_goal\n bonus += recur_found\n file_count += 1\n return (goal, file_count + bonus)", "def load_multiple_images(self, filepath_list):\n self.image = Image.from_multiples(filepath_list)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a timedelta object of n days
def days(n): return timedelta(days=n)
[ "def seconds2days(n):\n days = n / 60 / 60 / 24\n return days", "def delta(value, arg):\n return value + timedelta(days=arg)", "def number_of_days(iteration):\r\n return iteration // 24", "def minusndays(date,n):\n \n date_format = \"%Y-%m-%d\"\n return (datetime.strptime(date,date_format) - timedelta(n)).strftime(date_format)", "def test_n_days(self):\n today = datetime.date.today()\n self.assertEqual([today - datetime.timedelta(days=3)], parse(\"foo 3 days back bar\"))\n self.assertEqual([today - datetime.timedelta(days=10)], parse(\"foo 10 days ago bar\"))\n self.assertEqual([today + datetime.timedelta(days=3)], parse(\"foo in 3 days bar\"))\n self.assertEqual([today + datetime.timedelta(days=10)], parse(\"foo in 10 days bar\"))\n\n self.assertEqual([today + datetime.timedelta(days=10),\n today - datetime.timedelta(days=3)],\n parse(\"foo in 10 days and 3 days back bar\"))\n self.assertEqual([], parse(\"foo in 10 days ago bar\"))\n\n self.assertEqual([], parse(\"foo in a while bar\"))\n self.assertEqual([], parse(\"foo short while ago bar \"))\n\n self.assertEqual([today + datetime.timedelta(days=1)], parse(\"foo in a day bar\"))\n self.assertEqual([today - datetime.timedelta(days=1)], parse(\"foo a day ago bar\"))\n self.assertEqual([today - datetime.timedelta(days=1)], parse(\"foo a day back bar\"))\n self.assertEqual([], parse(\"foo next a day bar\"))\n self.assertEqual([], parse(\"foo in a day ago bar\"))\n self.assertEqual([], parse(\"foo in a day back bar\"))", "def days():\n return hours() % 24", "def previous_days(n, before=None):\n before = before or pendulum.today()\n return (before - before.subtract(days=n)).range('days')", "def ndays(nmonth=3):\n today0 = datetime.now()\n year3, month3 = (today0.year, today0.month - nmonth) if today0.month - nmonth >= 1 \\\n else (today0.year - 1, today0.month - nmonth + 12)\n date3 = datetime(year3, month3, today0.day)\n ndays = (today0 - date3).days\n\n return ndays", "def timedelta(td):\n return format_timedelta(td)", "def ntradingdays():\n return 252*10", "def get_days_diff(input_date: tuple) -> int:\n return (datetime.now()-datetime(*input_date)).days", "def nex_7_days():\n holder=[]\n base = datetime.datetime.today()\n for x in range(0, 7):\n holder.append(base + datetime.timedelta(days=x))\n return holder", "def delta_days(filename, folder, cfg):\n archives = archives_create_days(folder, cfg['pattern'])\n if archives:\n last_archive_day = list(archives.keys())[-1]\n return (file_create_day(filename) - last_archive_day).days", "def get_nth_date(n = 0, ago = False, date= \"\"):\n\n n = int(n)\n if date:\n date = datetime.datetime.strptime(date, '%Y-%m-%d') \n else:\n date = datetime.datetime.now()\n\n if ago:\n return (date - datetime.timedelta(days=n)).strftime(\"%Y-%m-%d\")\n return (date + datetime.timedelta(days=n)).strftime(\"%Y-%m-%d\")", "def test_n_days_ref(self):\n today = datetime.date.fromtimestamp(259200000)\n self.assertEqual([today - datetime.timedelta(days=3)], parse(\"foo 3 days back bar\", self.ref))\n self.assertEqual([today - datetime.timedelta(days=10)], parse(\"foo 10 days ago bar\", self.ref))\n self.assertEqual([today + datetime.timedelta(days=3)], parse(\"foo in 3 days bar\", self.ref))\n self.assertEqual([today + datetime.timedelta(days=10)], parse(\"foo in 10 days bar\", self.ref))\n\n self.assertEqual([today + datetime.timedelta(days=10),\n today - datetime.timedelta(days=3)],\n parse(\"foo in 10 days and 3 days back bar\", self.ref))\n self.assertEqual([], parse(\"foo in 10 days ago bar\", self.ref))\n\n self.assertEqual([], parse(\"foo in a while bar\", self.ref))\n self.assertEqual([], parse(\"foo short while ago bar \", self.ref))\n\n self.assertEqual([today + datetime.timedelta(days=1)], parse(\"foo in a day bar\", self.ref))\n self.assertEqual([today - datetime.timedelta(days=1)], parse(\"foo a day ago bar\", self.ref))\n self.assertEqual([today - datetime.timedelta(days=1)], parse(\"foo a day back bar\", self.ref))\n self.assertEqual([], parse(\"foo next a day bar\", self.ref))\n self.assertEqual([], parse(\"foo in a day ago bar\", self.ref))\n self.assertEqual([], parse(\"foo in a day back bar\", self.ref))", "def seconds2days(s):\n return s / seconds_per_day", "def dyn_adjust_time_period(n_urls_received, range_days):\n if n_urls_received in [0, 1]:\n range_days = int(range_days * 2)\n elif n_urls_received in [2, 3]:\n range_days = int(range_days * 1.5)\n elif n_urls_received in [4]:\n pass\n elif n_urls_received in range(5, 7):\n range_days = int(range_days / 1.5)\n elif n_urls_received in range(7, 11):\n range_days = int(range_days / 2)\n range_days = max(2, range_days)\n range_days = min(180, range_days)\n\n return range_days", "def _get_timedelta(self, quantity: int, forward: bool = True) -> timedelta:\n if forward:\n multiplier = 1\n else:\n multiplier = -1\n if self.unit.startswith('minute'):\n return timedelta(minutes=(int(self.value) * quantity * multiplier))\n elif self.unit.startswith('hour'):\n return timedelta(hours=(int(self.value) * quantity * multiplier))\n elif self.unit.startswith('day'):\n return timedelta(days=(int(self.value) * quantity * multiplier))\n else:\n raise ValueError", "def test_as_days(self):\n self.assertEqual(1, Duration(65 * 60 * 24).as_days)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }