query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
True if target and current object are equal and have the same parent. Equal means same mesh, same shape and same domain.
def is_consistent_with(self, target): same_parent = self.parent() == target.parent() # Note FP. Is it really required to have the # same parent? Inclusion of all proc may be enough? return npw.equal(self.shape, target.shape).all() and same_parent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n return self.mesh == other.mesh and \\\n npw.equal(self.shape, other.shape).all() and \\\n self.domain == other.domain", "def __eq__(self, other):\n parent_same = self.parent1.rid == other.parent1.rid \\\n and self.parent2.rid == other.parent2.rid\n\n parents_opposite = self.parent2.rid == other.parent1.rid \\\n and self.parent1.rid == other.parent2.rid\n\n return parent_same or parents_opposite", "def is_same_as(self, other) -> bool:\n return self.x == other.x and self.y == other.y", "def __eq__(self, other):\n return np.all(self.grid == other.grid) and np.all(self.pos == other.pos)", "def __eq__(self, other):\n if type(other) is type(self):\n return (self.x == other.x and self.y == other.y and self.z == other.z)\n return False", "def __eq__(self, obj):\r\n return (self.position == obj.position and self.left_cont == obj.left_cont\r\n and self.line == obj.line and self.right_cont == obj.right_cont)", "def __eq__(self, other):\n return type(self) == type(other) and self.node is other.node", "def is_same(self: _R, other: _R) -> bool:\n children = [i.render() for i in self.children]\n other_children = [i.render() for i in other.children]\n return other_children == children", "def __eq__(self, other):\n return (type(self) == type(other) and\n self.puzzle == other.puzzle and\n all([x in self.children for x in other.children]) and\n all([x in other.children for x in self.children]))", "def __eq__(self, other):\n return (type(self) == type(other) and\n self.n == other.n and self.m == other.m and\n self.from_grid == other.from_grid and\n self.to_grid == other.to_grid)", "def isSameKindAs(self, *args):\n return _osgAnimation.RigGeometry_isSameKindAs(self, *args)", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def isSameKindAs(self, *args):\n return _osgAnimation.Bone_isSameKindAs(self, *args)", "def eq(self, other: Any) -> bool:\n # TODO: Rasswanth: Fix later after the comparison operation\n # relative\n # from .... import Tensor\n\n # if (\n # isinstance(self.child, Tensor)\n # and isinstance(other.child, Tensor)\n # and (self.child != other.child).child.any() # type: ignore\n # ):\n # return False\n\n # if (\n # isinstance(self.child, np.ndarray)\n # and isinstance(other.child, np.ndarray)\n # and (self.child != other.child).any()\n # ):\n # return False\n\n # if self.rank != other.rank:\n # return False\n\n # if self.ring_size != other.ring_size:\n # return False\n\n # if self.nr_parties != other.nr_parties:\n # return False\n\n # return True\n\n # ATTENTION: Why are we getting here now when we never did before?\n if not hasattr(other, \"child\"):\n return self.child == other\n\n return self.child == other.child", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def identical_to(self, elem):\n \n return (self.n == elem.n) and (math.fabs(self.dx - elem.dx) < 0.001) and (math.fabs(self.dy - elem.dy) < 0.001) and (math.fabs(self.dz - elem.dz) < 0.001)", "def __eq__(self, other):\n return (type(self) == type(other) and\n self.from_grid == other.from_grid and\n self.to_grid == other.to_grid)", "def __eq__(self, other):\n return (type(self) == type(other) and\n self.from_grid == other.from_grid and\n self.to_grid == other.to_grid)", "def equals(self, other: InputTransform) -> bool:\n if hasattr(self, \"indices\") == hasattr(other, \"indices\"):\n if hasattr(self, \"indices\"):\n return (\n super().equals(other=other)\n and (self._d == other._d)\n and (self.indices == other.indices).all()\n )\n else:\n return super().equals(other=other) and (self._d == other._d)\n return False", "def __eq__(self, other):\n return (type(self) == type(other) and\n (self.from_grid == other.from_grid) and\n (self.to_grid == other.to_grid))", "def __eq__(self, other):\n return (type(self) == type(other) and\n self.from_grid == other.from_grid and\n self.to_grid == other.to_grid and\n self.m == other.m and\n self.n == other.n)", "def equal(self,other):\n if(self.x == other.x) and (self.y == other.y):\n return True\n else:\n return False", "def _node_equal(self, other):\n # We're not equal if other isn't a Node, or if other is a different class.\n if not isinstance(other, Node) or not isinstance(other, self.__class__):\n return False\n # Loop through all children, checking whether they are equal\n for self_child, other_child in zip(self.getChildren(), other.getChildren()):\n if not self_child == other_child:\n return False\n # If we get here, our two nodes much be equal\n return True", "def inside_itself(self):\n for i in range(2, len(self.nodes)):\n if self.nodes[0] == self.nodes[i]:\n return True\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.x == other.x and self.y == other.y\n return False", "def identical_grid(self, other) -> bool:\n return (\n (\n self.crs is None\n or other.raster.crs is None\n or self.crs == other.raster.crs\n )\n and np.allclose(self.transform, other.raster.transform, atol=1e-06)\n and np.allclose(self.shape, other.raster.shape)\n )", "def equals(self, other: InputTransform) -> bool:\n return (\n super().equals(other=other)\n and self.approximate == other.approximate\n and self.tau == other.tau\n )", "def independent(self) -> bool:\n parent = self._parent()\n if parent is None:\n return True\n connections = parent._graph.connections\n path = self._path\n lp = len(path)\n for con in connections:\n if con[\"type\"] == \"connection\":\n if con[\"target\"][:lp] == path:\n return False\n return True", "def can_communicate_with(self, target):\n if self == target:\n return True\n msg = 'You try to connect topologies belonging to'\n msg += ' two different mpi tasks. Set taskids properly or use'\n msg += ' InterBridge.'\n assert self.task_id() == target.task_id(), msg\n\n # Parent communicator\n # Todo : define some proper conditions for compatibility\n # between topo_from, topo_to and parent:\n # - same size\n # - same domain\n # - common processus ...\n # At the time we check that both topo have\n # the same comm_origin.\n return self.is_consistent_with(target)", "def __eq__(self, other):\n\n # Attributes defining the instance\n ckeys = ['x0', 'y0', 'nx', 'ny', 'dx', 'dy', 'origin']\n\n a = dict((k, getattr(self.corner_grid, k)) for k in ckeys)\n b = dict((k, getattr(other.corner_grid, k)) for k in ckeys)\n p1 = self.corner_grid.proj\n p2 = other.corner_grid.proj\n return (a == b) and proj_is_same(p1, p2)", "def __eq__(self, other: 'Tree') ->bool:\n return (type(self) is type(other) and\n self.value == other.value and\n self.children == other.children)", "def equals(self,other):\n return self._x == other.get_x() and self._y == other.get_y()", "def __eq__(self, other):\n return (\n self.constrain == other.constrain and\n self.width == other.width and\n self.height == other.height and\n self.upscale == other.upscale\n )", "def equals(self, other: InputTransform) -> bool:\n return super().equals(other=other) and (self.reverse == other.reverse)", "def e_paralelo(self, other):\n if (self == other) or (self.normaliza() == other.normaliza()):\n return True\n else:\n return False", "def is_same_branch(self, other):\n if self.id == other.id:\n return True\n elif self.is_descendant_of(other) or other.is_descendant_of(self):\n return True\n else:\n return False", "def __eq__(self, other):\r\n return abs(self.x - other.x) + abs(self.y - other.y) < Vertex.epsilon", "def is_identical(self, tree1, tree2):\r\n if not tree1 and not tree2:\r\n return True\r\n elif tree1 and tree2:\r\n return (tree1.root == tree2.root and self.is_identical(tree1.left,tree2.left) and self.is_identical(tree1.right, tree2.right))\r\n else:\r\n return False", "def __eq__(self, other: 'Origin') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "def __eq__(self, other) -> bool:\n if not isinstance(other, NilpotentOrbit):\n return False\n if self.my_type != other.my_type:\n return False\n if self.lie_rank != other.lie_rank:\n return False\n if self.decorator != other.decorator:\n return False\n return self.my_diagram == other.my_diagram", "def identical_to(self, elem):\n\n return (self.n1 == elem.n1) and (self.n2 == elem.n2)", "def __eq__(self, other):\n if not isinstance(other, Model):\n return False\n return self.graph == other.graph", "def __eq__(self, other: Vertex) -> bool:\n if isinstance(other, self.__class__):\n return self.id == other.id and self.edges == other.edges\n return False", "def __eq__(self, rhs):\n return self.x == rhs.x and self.y == rhs.y", "def __eq__(self, other):\n return self.position.data == other.position.data and \\\n self.velocity.data == other.velocity.data", "def check_objects_have_parents_and_are_not_referenced_twice(self):\n # logger.debug(\"ports:\")\n all_objs = crest.get_all_ports(self.model)\n # for o in all_objs:\n # print(o._name, o._parent)\n for obj in all_objs:\n assert all_objs.count(obj) == 1, f\"Port {obj._name} has been used multiple times\"\n assert obj._parent is not None, f\"Port {obj._name} has no parent definition\"\n\n # logger.debug(\"states:\")\n all_objs = crest.get_all_states(self.model)\n # for o in all_objs:\n # print(o._name, o._parent)\n for obj in all_objs:\n assert all_objs.count(obj) == 1, f\"State {obj._name} has been used multiple times\"\n assert obj._parent is not None, f\"State {obj._name} has no parent definition\"\n\n # logger.debug(\"updates:\")\n all_objs = crest.get_all_updates(self.model)\n # for o in all_objs:\n # print(o._name, o._parent)\n for obj in all_objs:\n assert all_objs.count(obj) == 1, f\"Update {obj._name} has been used multiple times\"\n assert obj._parent is not None, f\"Update {obj._name} has no parent definition\"\n\n # logger.debug(\"influences\")\n all_objs = crest.get_all_influences(self.model)\n # for o in all_objs:\n # print(o._name, o._parent)\n for obj in all_objs:\n assert all_objs.count(obj) == 1, f\"Influence {obj._name} has been used multiple times\"\n assert obj._parent is not None, f\"Influence {obj._name} has no parent definition\"\n\n # logger.debug(\"transitions:\")\n all_objs = crest.get_all_transitions(self.model)\n # for o in all_objs:\n # print(o._name, o._parent)\n for obj in all_objs:\n assert all_objs.count(obj) == 1, f\"Transition '{obj._name}' has been used multiple times\"\n assert obj._parent is not None, f\"Transition '{obj._name}' has no parent definition\"", "def __eq__(self, other):\n return (type(self) == type(other) and\n self.base_location == other.base_location and\n self.fold_path == other.fold_path and\n self.field == other.field)", "def __eq__(self, other) -> bool:\n if not isinstance(other, self.__class__):\n return False\n\n if self.number_of_nodes() != other.number_of_nodes():\n return False\n if self.number_of_edges() != other.number_of_edges():\n return False\n\n if list(self.nodes) != list(other.nodes):\n return False\n\n # Compare node data.\n for i in self.nodes:\n # We may want to exclude the 'name' attribute from comparisons, assuming\n # it has no logical meaning.\n if self.nodes[i] != other.nodes[i]:\n return False\n\n if list(self.edges) != list(other.edges):\n return False\n\n for i, j in self.edges:\n # Compare edge data.\n if self.edges[i, j] != other.edges[i, j]:\n return False\n\n return True", "def __eq__(self, other) -> bool:\n # If the other is a self, and points and steps match (not necessarily in same order), then equal\n return isinstance(other, Construction) and self.steps_set == other.steps_set", "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "def can_merge(self, target):\n if self.parentnode_id != target.parentnode_id:\n raise ValidationError(\n gettext_lazy('Cannot merge self into target, self and target is not part of same AssignmentGroup')\n )", "def __eq__(self, other):\n if isinstance(other, Quaternion):\n return self.__real == other.__real and (self.__img == other.__img).all()\n return False", "def is_equal(self, a, b):\n return a.X[0] == b.X[0]", "def master(self):\n return self.depth == 0", "def is_equal (self, p):\n assert isinstance (p, Problem), \"Must be comparing two of same type\"\n if self.objects != p.objects:\n print(\"objects\")\n return False\n\n if self.init != p.init:\n #print \"init\"\n #print \"*self*\"\n #print self.init\n #print \"*p*\"\n #print p.init\n return False\n\n if self.goal != p.goal:\n print(\"goal\")\n return False\n\n if not all ([sa == pa for sa, pa in zip (self.actions, p.actions)]):\n print(\"actions\")\n return False\n\n if not all ([sp == pp for sp, pp in zip (self.predicates, p.predicates)]):\n print(\"predicates\")\n return False\n\n if self.types != p.types or self.parent_types != p.parent_types:\n print(\"types\")\n return False\n\n return True", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other_vertex):\n return self.name == other_vertex.name and self.state == other_vertex.state", "def __equals__(self, to_compare):\n try:\n # Try to compare - this likely fails when it is compared to a non\n # Quaternion object\n return \\\n (self.w == to_compare.w) and \\\n (self.x == to_compare.x) and \\\n (self.y == to_compare.y) and \\\n (self.z == to_compare.z)\n\n except AttributeError:\n return False", "def __eq__(self, other):\n if not type(other) == type(self):\n return False\n sedges, oedges = self.edges, other.edges\n return ((len(sedges) == len(oedges)) and\n all(numpy.all(se == oe) for (se, oe) in zip(sedges, oedges)))", "def is_identical(self, other):\n if self.is_input != other.is_input:\n return False\n\n if self.is_raw() and other.is_raw():\n return True\n if self.is_raw() or other.is_raw():\n return False\n return self.structure.is_identical(other.structure)", "def testParent(self):\n self.assertEqual(\n self.parent,\n self.mr.parent\n )", "def identical_to(self, elem):\n\n return (self.n1 == elem.n1) and (self.n2 == elem.n2) and (self.n3 == elem.n3) and (self.n4 == elem.n4)", "def __eq__(self, other):\n return type(self) == type(other) and self._full_path == other.full_path", "def __eq__(self, other: 'OriginInput') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "def testControlHopperContainerEqual(self):\n\t\tgenome1 = hopper_utils.ControlHopperContainer()\n\t\tgenome1.rear_mass = 4.0\n\t\tgenome1.mid_mass = 2.0\n\t\tgenome1.front_mass = 1.0\n\t\tgenome2 = hopper_utils.ControlHopperContainer()\n\t\tgenome2.rear_mass = 4.0\n\t\tgenome2.mid_mass = 2.0\n\t\tgenome2.front_mass = 1.0\n\t\tself.failIf(genome1 == genome2)", "def equals(self, other: InputTransform) -> bool:\n if hasattr(self, \"indices\") == hasattr(other, \"indices\"):\n if hasattr(self, \"indices\"):\n return (\n super().equals(other=other)\n and (self._d == other._d)\n and (self.learn_bounds == other.learn_bounds)\n and (self.indices == other.indices).all()\n )\n else:\n return (\n super().equals(other=other)\n and (self._d == other._d)\n and (self.learn_bounds == other.learn_bounds)\n )\n return False", "def __eq__(self, other):\n if type(other) != type(self):\n return False\n # Check the properties inherited from Data\n if not super(DataND, self).__eq__(other):\n return False\n if other.axes != self.axes:\n return False\n if other.normalizations != self.normalizations:\n return False\n if other.FTparameters != self.FTparameters:\n return False\n if not array_equal(other.values, self.values):\n return False\n return True", "def _is_node_identical(self, job_name_a, job_name_b):\n\n node_a = self._graph_a.get_node(job_name_a)\n node_b = self._graph_b.get_node(job_name_b)\n\n # Check for same job type name and version\n if node_a.job_type_name != node_b.job_type_name or node_a.job_type_version != node_b.job_type_version:\n return False\n\n # Check that A and B have matching parents that are identical to one another\n a_parent_names = set(a_parent.node_name for a_parent in node_a.parents)\n for b_parent in node_b.parents:\n b_parent_name = b_parent.node_name\n if b_parent_name not in self._identical_nodes:\n return False # B has a parent that is not identical to any other node\n matched_a_parent_name = self._identical_nodes[b_parent_name]\n if matched_a_parent_name not in a_parent_names:\n return False # B has a parent that does not match a parent of A\n a_parent_names.remove(matched_a_parent_name)\n if a_parent_names:\n return False # A has a parent that does not match a parent of B\n\n # Check that A and B use the same inputs\n a_inputs = dict(node_a.inputs)\n for b_input_name in node_b.inputs:\n if b_input_name not in a_inputs:\n return False # B input not defined for A\n b_input = node_b.inputs[b_input_name]\n a_input = a_inputs[b_input_name]\n if not a_input.is_equal_to(b_input, self._matched_recipe_inputs, self._identical_nodes):\n return False # A and B have a non-matching input\n del a_inputs[b_input_name]\n if a_inputs:\n return False # A input not defined for B\n\n return True", "def same_as(self, space, in_space):\n if self.marks == space.marks and self.genus == space.genus:\n return True\n space = space.complementary_component(in_space)\n if self.marks == space.marks and self.genus == space.genus:\n return True\n return False", "def __eq__(self, other):\n return self._coords == other._coords", "def __eq__(self, other):\n return self._coords == other._coords", "def __ne__(self, other):\n return np.all(self.grid != other.grid) or np.all(self.pos != other.pos)", "def is_parent(self):\n if self.parent is not None:\n return False\n return True", "def is_parent_of(self):\n return self.hasLabel('parent_of')", "def __eq__(self, other):\n return self.key == other.key \\\n and self.get_inside() == other.get_inside() \\\n and self.get_outside() == other.get_outside()", "def __eq__(self, other):\n return (self.vertices == other.vertices and self.weight == other.weight)", "def same_as(self, other):\n return super().__eq__(other)", "def __eq__(self, other):\n\n return (self.nodes[0].id == other.nodes[0].id) & \\\n (self.nodes[1].id == other.nodes[1].id) & \\\n (self.name == other.name)", "def isSameKindAs(self, *args):\n return _osgAnimation.VertexInfluenceMap_isSameKindAs(self, *args)", "def __eq__(self, node):\n if node == None or self.element != node.element:\n return False\n return self.left == node.left and self.right == node.right", "def isSameKindAs(self, *args):\n return _osgAnimation.Skeleton_isSameKindAs(self, *args)", "def __eq__(self, other):\n return (isinstance(other, self.__class__) and\n self._input_dims == other._input_dims and\n self._output_dims == other._output_dims)", "def __eq__(self, other):\n x_eq = self.x == other.x\n y_eq = self.y == other.y\n return x_eq and y_eq", "def __eq__(self, other):\n x_eq = self.x == other.x\n y_eq = self.y == other.y\n return x_eq and y_eq", "def isSameKindAs(self, *args):\n return _osgAnimation.BasicAnimationManager_isSameKindAs(self, *args)", "def __eq__(self, other):\r\n return self.id_map == other.id_map and self.matrix == other.matrix\\\r\n and self.size == other.size", "def is_identical(self, other):\n return (self.compounddatatype == other.compounddatatype and\n self.min_row == other.min_row and\n self.max_row == other.max_row)", "def __eq__(self, other):\n if self is other:\n return True\n elif type(self) != type(other):\n return False\n else:\n # A node is considered equal if it has the exact same state as\n # another node\n if self.board_state == other.board_state:\n return True\n else:\n return False", "def isSameTree(self, p: TreeNode, q: TreeNode) -> None:\n # if both are null -> return true\n if not p and not q:\n return True\n\n # if only one of them is null -> False\n if not p or not q: \n return False\n \n # if value is different -> False\n if p.val != q.val:\n return False \n\n return self.isSameTree(p.right, q.right) and self.isSameTree(p.left, q.left)", "def orphaned(self):\n return (self.parent is None)", "def __eq__(self, other):\n\t\treturn self._coords == other._coords", "def __eq__(self, other):\r\n if isinstance(other, vec4):\r\n return self.x==other.x and self.y==other.y and self.z==other.z\r\n else:\r\n return 0", "def __eq__(self, other):\n if isinstance(self, other.__class__):\n return (self.r == other.r\n and self.g == other.g\n and self.b == other.b\n and self.a == other.a)\n\n return False", "def is_parent_of(cls, *args):\n return cls.graph_traversal(None, None, Bytecode()).is_parent_of(*args)", "def __eq__(self, other):\n if self.add_mode != other.add_mode:\n return False\n if not np.isclose(self.add_tol, other.add_tol):\n return False\n\n if not np.isclose(self.top, other.top):\n return False\n if not np.isclose(self.base, other.base):\n return False\n\n if (self.height != other.height):\n return False\n\n if not np.allclose(self.depths, other.depths):\n return False\n if not np.allclose(self.img, other.img):\n return False\n\n return True" ]
[ "0.70844996", "0.66548616", "0.66513604", "0.6646044", "0.64481336", "0.6433953", "0.63558435", "0.63483995", "0.6299958", "0.6295003", "0.62756413", "0.6267289", "0.6267289", "0.6267289", "0.6267289", "0.6267289", "0.626677", "0.62649465", "0.62575334", "0.6204619", "0.62005", "0.62005", "0.61947954", "0.6190282", "0.6183342", "0.61703104", "0.61490566", "0.6131132", "0.611621", "0.61115634", "0.6106623", "0.6093451", "0.60578465", "0.6057129", "0.602797", "0.60216", "0.6016046", "0.6015551", "0.6012849", "0.60112673", "0.59939504", "0.59878474", "0.5984988", "0.5980694", "0.5975614", "0.5972011", "0.59684896", "0.5965979", "0.5958836", "0.5958784", "0.5952794", "0.59478766", "0.5947628", "0.59432703", "0.594288", "0.5937181", "0.59364873", "0.5919003", "0.591765", "0.59102607", "0.59083694", "0.5905195", "0.5900531", "0.58992404", "0.58977354", "0.58961755", "0.5889765", "0.58871484", "0.5886326", "0.5882108", "0.5870024", "0.58678067", "0.586519", "0.58634293", "0.58521134", "0.58521134", "0.5843863", "0.58384496", "0.583629", "0.583352", "0.5825231", "0.5821512", "0.58207494", "0.58205813", "0.5820423", "0.5819601", "0.5817927", "0.5817454", "0.5817454", "0.5811446", "0.58111167", "0.5810411", "0.58029646", "0.58014405", "0.57955015", "0.5790797", "0.5787966", "0.5786807", "0.57794946", "0.5779073" ]
0.7603044
0
True if current topo is complient with target.
def can_communicate_with(self, target): if self == target: return True msg = 'You try to connect topologies belonging to' msg += ' two different mpi tasks. Set taskids properly or use' msg += ' InterBridge.' assert self.task_id() == target.task_id(), msg # Parent communicator # Todo : define some proper conditions for compatibility # between topo_from, topo_to and parent: # - same size # - same domain # - common processus ... # At the time we check that both topo have # the same comm_origin. return self.is_consistent_with(target)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def independent(self) -> bool:\n parent = self._parent()\n if parent is None:\n return True\n connections = parent._graph.connections\n path = self._path\n lp = len(path)\n for con in connections:\n if con[\"type\"] == \"connection\":\n if con[\"target\"][:lp] == path:\n return False\n return True", "def is_target_remote():\n return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def is_connected(self) -> bool:\n for node in self.nodes.values():\n if node.is_connected:\n return True\n return False", "def is_strongly_connected(self):\n if self.order()==1:\n return True\n\n try:\n return self._backend.is_strongly_connected()\n\n except AttributeError:\n return len(self.strongly_connected_components()) == 1", "def is_consistent_with(self, target):\n same_parent = self.parent() == target.parent()\n # Note FP. Is it really required to have the\n # same parent? Inclusion of all proc may be enough?\n return npw.equal(self.shape, target.shape).all() and same_parent", "def is_gentarget(self, target):\r\n raise NotImplementedError", "def is_connected(self):\n connected = False\n self.state = self.mesh.state()\n if self.state in (STATE_CHILD, STATE_ROUTER, STATE_LEADER, STATE_LEADER_SINGLE):\n connected = True\n return connected", "def is_adjacent(self, remote_host_name):\n # Check if a topology is defined, otherwise use fully connected\n if self.topology is None:\n return True\n\n if self.name in self.topology:\n if remote_host_name in self.topology[self.name]:\n return True\n else:\n return False\n else:\n logging.warning(\n \"Node {} is not in the specified topology and is therefore \"\n \"assumed to have no neighbors\".format(self.name)\n )\n return False", "def is_peered_with(self, other: SkupperSite) -> bool:\n if not self.cluster.peering:\n return False\n\n for c in self.cluster.peering.connections:\n if (\n isinstance(\n c,\n (\n ClusterPeeringConnectionClusterRequesterV1,\n ClusterPeeringConnectionClusterAccepterV1,\n ),\n )\n ) and c.cluster.name == other.cluster.name:\n return True\n return False", "def is_setup_connected(self):\n return bool(self.get_target_namespace())", "def is_connected(self):\n return True", "def is_central_server() -> bool:\n return hasattr(Config().algorithm,\n 'cross_silo') and Config().args.port is None", "def failover_target(self) -> bool:\n return pulumi.get(self, \"failover_target\")", "def is_active(self):\n if (\n '_transport' in self.__dict__ and\n self._transport.is_active()\n ):\n return True\n return False", "def is_connected(self) -> bool:", "def is_connected(self):\n return self._current_protocol is not None", "def at_target(self):\n return self.location == self.target_location", "def isConnectedTo(self, node):\n for arc in self._arcsFrom:\n if arc.getFinish() is node:\n return True\n return False", "def is_local_client(self):\n return self.msg.is_local_client", "def _quell_co2(self, flowable, context):\n if self._quell_biogenic is False:\n return False\n if flowable in self._bio_co2:\n if context.is_subcompartment(self._cm['from air']):\n return True\n if context.is_subcompartment(self._cm['Emissions']):\n return True\n return False", "def is_connected(self):\n if self.V < 1:\n raise ValueError(\"empty graph\")\n if self.V < 2:\n return True\n if self.E == 0:\n return False\n cc = self.cc()\n return int(cc.max() == 0)", "def has_target(self):\n return self._has_target", "def bfs_is_connected(self):\n q = Queue.Queue()\n origins = [self.vertices()[0]]\n traveled = set(origins)\n while origins:\n for o in origins:\n for child in self.out_vertices(o):\n if child not in traveled:\n q.put(child)\n traveled.add(child)\n\n origins = []\n while not q.empty():\n origins.append(q.get())\n if len(traveled) == self.order():\n return True\n return False", "def is_remote(client):\n if client == Client.ORIGIN:\n return is_origin_remote()\n elif client == Client.TARGET:\n return is_target_remote()\n elif client == Client.LOCAL:\n return False\n else:\n return False", "def is_connected(self):\n vs = self.vertices()\n visited = self.bfs(vs[0])\n return len(visited) == len(vs)", "def is_on(self) -> bool:\n val = bool(self._cluster_handler.cluster.get(self._zcl_attribute))\n return (not val) if self.inverted else val", "def is_connected(self) -> bool:\n return False if self._snitun is None else self._snitun.is_connected", "def is_target(self):\n\t\treturn self.window and self.window.target is self", "def is_community(self):\n context = aq_inner(self.context)\n for obj in aq_chain(context):\n if ICommunity.providedBy(obj):\n return True\n\n return False", "def is_active(self):\n group_names = self.get_var(\"group_names\", default=[])\n master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names\n return super(OvsVersion, self).is_active() and master_or_node", "def IsTopologicallyValid(*args):\n return _BRepAlgo.brepalgo_IsTopologicallyValid(*args)", "def target_connected(self):\n return self.connected() and bool(self._dll.JLINKARM_IsConnected())", "def is_client(self) -> bool:\n return self.zone.SharedRoomID and not self.zone.MasterMode", "def is_solved(self):\n return self._start == self._target", "def is_connected(self) -> bool:\n pass", "def brepalgo_IsTopologicallyValid(*args):\n return _BRepAlgo.brepalgo_IsTopologicallyValid(*args)", "def is_node_master(self) -> bool:\n self._assert_local_rank_set()\n return self.local_rank == 0", "def has_target(self):\n return self.target is not None", "def is_connected(self):\n return self.is_connected", "def isConnected(self):\n return self.__cooperationClient.hasConnections()", "def is_solved(self):\n return (self.from_grid == self.to_grid)", "def is_target_in(self, newtarget):\n from .utils.shape import HAS_SHAPELY\n # Test if shapely\n if not HAS_SHAPELY:\n print(\"WARNING: could not test if the target is in the image since you do not have SHAPELY\")\n return True\n # Test if WCS \n if not self.has_wcs():\n print(\"WARNING: because there is no wcs solution, \"+\\\n \"I can't test the inclusion of the new astrotarget\")\n return True\n \n return self.wcs.coordsAreInImage(*newtarget.radec)", "def can_prove(self, target):\n return self.prop == target.prop and set(self.hyps).issubset(set(target.hyps))", "def is_connected(self):\n return self.factory.is_connected", "def is_polycyclic(self):\n return self.is_solvable", "def is_distributed(self) -> bool:\n return self.size > 1", "def gethooverable(self):\n try:\n return self.hooverable\n except:\n return False", "def is_connected(self):\n return False", "def isConnected(self):\n return self.transport is not None and self.started", "def connected( self, u, v ):\n try:\n self.shortestPath(u, v)\n return True\n except nx.NetworkXNoPath:\n return False", "def is_solved(self):\n return self.to_grid == self.from_grid", "def local(self):\n return self.hostname == \"localhost\" and self.user is None and self.ssh_args is None", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.from_grid == self.to_grid", "def isConnected(self):\n return False", "def is_connected(self):\n if self.server: return True\n return False", "def is_connected_to(self, receiver: SkupperSite) -> bool:\n return receiver in self.connected_sites", "def isClientMultiplexingInterface(self):\n adaptation = self.getServerAdaptationFunction()\n if adaptation == None:\n return False # no adaptatation underneath\n else:\n clientcount = adaptation.getClientCount() # max. number of clients; None means unlimited\n return (clientcount != 1)", "def has_upstream_server(self) -> bool:\n return True if self.host is not None else False", "def is_connected(object_one, object_two):\n\n for vert_one in object_one.Vertexes:\n for vert_two in object_two.Vertexes:\n if (vert_one.X == vert_two.X) and (vert_one.y == vert_two.y):\n return True\n\n return False", "def is_connected(self):\n return self.connector and self.connector.state == \"connected\"", "def is_connected(self):\n return self.connected", "def solved(self):\n return GOAL_VEHICLE in self.vehicles", "def is_remote(self): # -> Any | bool:\n ...", "def is_transitive(self):\r\n return isinstance(self.pip_requirement.comes_from, InstallRequirement)", "def is_connected(self):\n return self.connector and self.connector.state == 'connected'", "def local_network_check():\n return (\n network.show_active() in LOCAL_BLOCKCHAIN_ENVINROMENTS\n or network.show_active() in FORKED_LOCAL_ENVIRONMENTS\n )", "def is_concrete(self):\r\n targets = list(self.resolve())\r\n return len(targets) == 1 and targets[0] == self", "def all_customers_in_destination(self):\n if len(self.customer_agents) > 0:\n return all([customer.is_in_destination() for customer in self.customer_agents.values()])\n else:\n return False", "def IsTarget(self, target_name):\n return target_name in self.GetTargets()", "def is_router(self):\n # @todo: Rewrite\n return self.address_set.count() > 1", "def local_is_up(self, target):\n try:\n check_address(target)\n except ValueError:\n self.logger.warning('Target must be a tuple (IP, port), where IP '\n 'is a string (i.e. \"192.168.0.1\") and port is '\n 'an integer (i.e. 40000). Alternatively '\n 'target can be a valid UNIX domain socket.')\n return False\n\n self.check_tunnels()\n return self.tunnel_is_up.get(target, True)", "def _is_sink() -> bool:\n\n def _is_inplace(n: Node):\n \"\"\"Get the inplace argument from ``torch.fx.Node``\n \"\"\"\n inplace = False\n if n.op == \"call_function\":\n inplace = n.kwargs.get(\"inplace\", False)\n elif n.op == \"call_module\":\n inplace = getattr(n.graph.owning_module.get_submodule(n.target), \"inplace\", False)\n return inplace\n\n def _is_shape_consistency(n: Node):\n \"\"\"Check if this node is shape-consistency node (i.e. ``runtime_apply`` or ``runtime_apply_for_iterable_object``)\n \"\"\"\n return n.target in [runtime_apply, runtime_apply_for_iterable_object, runtime_comm_spec_apply]\n\n return not sum([v for _, v in deps.items()]) and not any(map(_is_inplace, n.users)) and not any(\n map(_is_shape_consistency, n.users))", "def is_connected(self):\n \n # All the vertices in the graph\n vertices = set(self.vertices())\n \n # Take a random vertex to start the search from\n vertex_search_start = self._edges.keys()[0]\n vertices_found = set(self.DFS(vertex_search_start))\n \n return vertices == vertices_found", "def isconnected(self) -> bool:", "def is_connected(self, node1, node2):\r\n\r\n return node1 in self.graph and node2 in self.graph[node1]", "def is_remote(self):\n return False", "def is_connected(A) -> bool:\r\n assert A is not None\r\n if is_multiobjects(A):\r\n return all(is_connected(adj) for adj in A)\r\n return sp.csgraph.connected_components(A, directed=is_directed(A), return_labels=False, connection='weak') == 1", "def check_connected(self):\n return\\\n (self.setup is not None) and\\\n (self.design is not None) and\\\n (self.project is not None) and\\\n (self.desktop is not None) and\\\n (self.app is not None)", "def isconnected(self) -> bool:\n ...", "def is_elected_leader(resource):\n if is_clustered():\n if not is_crm_leader(resource):\n log('Deferring action to CRM leader.', level=INFO)\n return False\n else:\n peers = peer_units()\n if peers and not oldest_peer(peers):\n log('Deferring action to oldest service unit.', level=INFO)\n return False\n return True", "def is_resolved(self) -> bool:\n return self._target_object is not None", "def is_connected(self):\n if self._connected:\n return True\n else:\n return perms_are_connected(self._g, self.degree())", "def is_on(self) -> bool:\n for bodyObjnam in self._bodies:\n body = self._controller.model[bodyObjnam]\n if (\n body[STATUS_ATTR] == \"ON\"\n and body[HEATER_ATTR] == self._poolObject.objnam\n and body[HTMODE_ATTR] != \"0\"\n ):\n return True\n return False", "def negotiation_should_advance(self):\n # Generally, this separates a bare TCP connect() from a True\n # RFC-compliant telnet client with responding IAC interpreter.\n server_do = sum(enabled for _, enabled in self.writer.remote_option.items())\n client_will = sum(enabled for _, enabled in self.writer.local_option.items())\n return bool(server_do or client_will)", "def is_cyclic(self):\n \n visited = set()\n path = []\n \n for node in self.node_set:\n if node not in visited:\n if self.is_cyclic_helper(node, visited, path) is True:\n return True \n \n visited.clear()\n path.clear()\n return False", "def _mapped_to_this_conductor(self, node_uuid, driver):\n try:\n ring = self.ring_manager[driver]\n except exception.DriverNotFound:\n return False\n\n return self.host in ring.get_hosts(node_uuid)", "def is_client(self):\n if not hasattr(self, '_is_client'):\n self._is_client = hasattr(self, 'client')\n return self._is_client", "def is_controlled(self):\n return False if self._remote_controller == \"\" else True", "def is_remote(self):\n if socket.gethostbyname(socket.gethostname()).startswith('10.7'):\n return False\n else:\n return True", "def has_undercoordinated_c(self) -> bool:\n if self._undercoordinated_carbon is not None:\n return self._undercoordinated_carbon\n\n self._has_undercoordinated_carbon()\n return self._undercoordinated_carbon", "def has(self, target):\r\n return target in self.by_target", "def is_connected(self):\n return \"_connection\" in self.__dict__", "def is_contractor(self):\n\n return self._is_contractor", "def is_connected(self, node1, node2):\r\n\r\n return node1 in self._graph and node2 in self._graph[node1]", "def is_transitive(self, strict=True):\n if self._is_transitive: # strict or not, if True then True\n return self._is_transitive\n if strict:\n if self._is_transitive is not None: # we only store strict=True\n return self._is_transitive\n\n ans = len(self.orbit(0)) == self.degree\n self._is_transitive = ans\n return ans\n\n got_orb = False\n for x in self.orbits():\n if len(x) > 1:\n if got_orb:\n return False\n got_orb = True\n return got_orb", "def is_connected(self):\n return self._proxy.get(\"is_connected\", \"filterwheel\")", "def has(self, target):\n return target in self.by_target", "def is_percolates(self):\n return self._uf.connected(self._top_idx, self._bottom_idx)" ]
[ "0.63338524", "0.61239785", "0.6074758", "0.5993989", "0.59502983", "0.5931595", "0.59294623", "0.59263664", "0.5918986", "0.58928543", "0.5868036", "0.5846863", "0.58454454", "0.5810941", "0.5808112", "0.58062047", "0.57958764", "0.5795005", "0.5779186", "0.5778966", "0.577796", "0.5774486", "0.57744783", "0.57719225", "0.5763049", "0.5757841", "0.5755968", "0.57556546", "0.57448035", "0.5744082", "0.57228935", "0.5721812", "0.57072926", "0.5705934", "0.5704837", "0.56967515", "0.5693194", "0.569262", "0.56918377", "0.56901205", "0.56570214", "0.5650002", "0.56488323", "0.5645852", "0.5635175", "0.5633015", "0.56253403", "0.5623047", "0.55986726", "0.55912477", "0.5587286", "0.55870193", "0.5577439", "0.5577439", "0.5577439", "0.55715376", "0.55704373", "0.55651885", "0.55591017", "0.5551313", "0.55512625", "0.55494046", "0.5544774", "0.5542474", "0.5537172", "0.5529706", "0.55288744", "0.55266255", "0.5512407", "0.5505126", "0.5504904", "0.5499701", "0.54913104", "0.5490446", "0.54897416", "0.5488859", "0.5488845", "0.5484058", "0.54820013", "0.5477921", "0.5475895", "0.5467425", "0.5466586", "0.5466348", "0.54500556", "0.54495263", "0.5437157", "0.5435795", "0.54315335", "0.5428716", "0.5414856", "0.54111767", "0.5408714", "0.54078907", "0.5407783", "0.5398886", "0.5395391", "0.53895235", "0.53868353", "0.5384116" ]
0.735502
0
Collect global indices of local meshes on each process of topo
def gather_global_indices(topo, toslice=True, root=None, comm=None): if comm is None: comm = topo.parent() size = comm.size start = topo.mesh.start() end = topo.mesh.stop() - 1 # communicator that owns the topology rank = comm.Get_rank() dimension = topo.domain.dimension iglob = npw.int_zeros((dimension * 2, size)) iglob_res = npw.int_zeros((dimension * 2, size)) iglob[0::2, rank] = start iglob[1::2, rank] = end # iglob is saved as a numpy array and then transform into # a dict of slices since mpi send operations are much # more efficient with numpy arrays. if root is None: comm.Allgather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT]) else: comm.Gather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT], root=root) if toslice: return utils.arrayToDict(iglob_res) else: return iglob_res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_local_indices(self, part, ctx):\n return self.map_to_global(\n F.arange(0, self.local_size(part), ctx=ctx), part\n )", "def _exchange_ghosts_local(self):\n for d in xrange(self._dim):\n self._exchange_ghosts_local_d(d)", "def getGlobalIndices( self, indices: list):\n result = indices.copy()\n for i,toAdd in enumerate(self._layout.starts):\n result[self._layout.dims_order[i]]=indices[i]+toAdd\n return result", "def gather_global_indices_overlap(topo=None, comm=None, dom=None,\n toslice=True, root=None):\n if topo is None:\n assert comm is not None and dom is not None\n size = comm.Get_size()\n rank = comm.Get_rank()\n dimension = dom.dimension\n iglob = npw.int_zeros((dimension * 2, size))\n iglob_res = npw.int_zeros((dimension * 2, size))\n iglob[1::2, rank] = -1\n if root is None:\n comm.Allgather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT])\n else:\n comm.Gather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT],\n root=root)\n if toslice:\n return utils.arrayToDict(iglob_res)\n else:\n return iglob_res\n\n else:\n return TopoTools.gather_global_indices(topo, toslice, root, comm)", "def gather_dof_coordinates(V: FunctionSpace, dofs: np.ndarray):\n x = V.tabulate_dof_coordinates()\n local_dofs = dofs[dofs < V.dofmap.index_map.size_local * V.dofmap.index_map_bs]\n coords = x[local_dofs]\n num_nodes = len(coords)\n glob_num_nodes = MPI.COMM_WORLD.allreduce(num_nodes, op=MPI.SUM)\n recvbuf = None\n if MPI.COMM_WORLD.rank == 0:\n recvbuf = np.zeros(3 * glob_num_nodes, dtype=np.float64)\n sendbuf = coords.reshape(-1)\n sendcounts = np.array(MPI.COMM_WORLD.gather(len(sendbuf), 0))\n MPI.COMM_WORLD.Gatherv(sendbuf, (recvbuf, sendcounts), root=0)\n glob_coords = MPI.COMM_WORLD.bcast(recvbuf, root=0).reshape((-1, 3))\n return glob_coords", "def global_index(self):\n raise NotImplementedError", "def global_to_local_map(self):\n Ngrid = 62\n map_file_to_domain = lambda (x,y,z): (x-1) + (y-1)*Ngrid + (z-1)*Ngrid*Ngrid\n\n if self._global_to_local_map is None: \n m = {}\n for k,v in self.ids_map.iteritems(): \n m[map_file_to_domain(k)] = v\n self._global_to_local_map = m\n return self._global_to_local_map", "def mpi_index_maps(loc_idx, shape, topology, coords, comm):\n\n nprocs = comm.Get_size()\n\n # Gather data structures from all ranks in order to produce the\n # relevant mappings.\n dat_len = np.zeros(topology, dtype=tuple)\n for j in range(nprocs):\n dat_len[coords[j]] = comm.bcast(shape, root=j)\n if any(k == 0 for k in dat_len[coords[j]]):\n dat_len[coords[j]] = as_tuple([0]*len(dat_len[coords[j]]))\n\n # If necessary, add the time index to the `topology` as this will\n # be required to correctly construct various maps.\n if len(np.amax(dat_len)) > len(topology):\n topology = as_list(topology)\n coords = [as_list(l) for l in coords]\n for _ in range(len(np.amax(dat_len)) - len(topology)):\n topology.insert(0, 1)\n for e in coords:\n e.insert(0, 0)\n topology = as_tuple(topology)\n coords = as_tuple([as_tuple(i) for i in coords])\n dat_len = dat_len.reshape(topology)\n dat_len_cum = distributed_data_size(dat_len, coords, topology)\n\n # This 'transform' will be required to produce the required maps\n transform = []\n for i in as_tuple(loc_idx):\n if isinstance(i, slice):\n if i.step is not None:\n transform.append(slice(None, None, np.sign(i.step)))\n else:\n transform.append(slice(None, None, None))\n else:\n transform.append(slice(0, 1, None))\n transform = as_tuple(transform)\n\n global_size = dat_len_cum[coords[-1]]\n\n indices = np.zeros(global_size, dtype=tuple)\n global_si = np.zeros(global_size, dtype=tuple)\n it = np.nditer(indices, flags=['refs_ok', 'multi_index'])\n while not it.finished:\n index = it.multi_index\n indices[index] = index\n it.iternext()\n global_si[:] = indices[transform]\n\n # Create the 'rank' slices\n rank_slice = []\n for j in coords:\n this_rank = []\n for k in dat_len[j]:\n this_rank.append(slice(0, k, 1))\n rank_slice.append(this_rank)\n # Normalize the slices:\n n_rank_slice = []\n for i in range(len(rank_slice)):\n my_coords = coords[i]\n if any([j.stop == j.start for j in rank_slice[i]]):\n n_rank_slice.append(as_tuple([None]*len(rank_slice[i])))\n continue\n if i == 0:\n n_rank_slice.append(as_tuple(rank_slice[i]))\n continue\n left_neighbours = []\n for j in range(len(my_coords)):\n left_coord = list(my_coords)\n left_coord[j] -= 1\n left_neighbours.append(as_tuple(left_coord))\n left_neighbours = as_tuple(left_neighbours)\n n_slice = []\n for j in range(len(my_coords)):\n if left_neighbours[j][j] < 0:\n start = 0\n stop = dat_len_cum[my_coords][j]\n else:\n start = dat_len_cum[left_neighbours[j]][j]\n stop = dat_len_cum[my_coords][j]\n n_slice.append(slice(start, stop, 1))\n n_rank_slice.append(as_tuple(n_slice))\n n_rank_slice = as_tuple(n_rank_slice)\n\n # Now fill each elements owner:\n owners = np.zeros(global_size, dtype=np.int32)\n send = np.zeros(global_size, dtype=np.int32)\n for i in range(len(n_rank_slice)):\n if any([j is None for j in n_rank_slice[i]]):\n continue\n else:\n owners[n_rank_slice[i]] = i\n send[:] = owners[transform]\n\n # Construct local_si\n local_si = np.zeros(global_size, dtype=tuple)\n it = np.nditer(local_si, flags=['refs_ok', 'multi_index'])\n while not it.finished:\n index = it.multi_index\n owner = owners[index]\n my_slice = n_rank_slice[owner]\n rnorm_index = []\n for j, k in zip(my_slice, index):\n rnorm_index.append(k-j.start)\n local_si[index] = as_tuple(rnorm_index)\n it.iternext()\n return owners, send, global_si, local_si", "def write_global_local_maps(dest,global_local,local_global):", "def get_locations(self):\n self.locations = {} # reset dictionary\n for node in self.extant_p:\n if node.host not in self.locations:\n self.locations.update({node.host: []})\n self.locations[node.host].append(node)", "def get_main_branch_indices(self):\n\n assert self.halt is not None\n prog_main_index = self.halt_index\n prog_main_indices = self.halt.prop(\n 'progenitor.main.indices', self.halt_index)\n self.main_branch_indices = prog_main_indices\n return prog_main_indices", "def get_all_local_clustering_coef(g):\n local_cc = {}\n\n for n in nx.nodes(g):\n local_cc[n] = get_local_clustering_coef(g, n)\n\n return local_cc", "def get_prescribed_indexes(self):\n global_prescribed = []\n for node in self.preprocessor.nodes.values():\n if node.there_are_prescribed_dofs:\n starting_position = node.global_index * DOF_PER_NODE_STRUCTURAL\n dofs = np.array(node.get_prescribed_dofs_bc_indexes()) + starting_position\n global_prescribed.extend(dofs)\n return global_prescribed", "def agent_locs_idx(self):\n return tuple(self.agent_locs.T)", "def atlas_clusters():\n pass", "def get_all_master_idx_paths(self):\n paths = utilities.get_all_master_index_paths(rootdir=constants.flow_data_dir)\n return paths", "def _generate_vertexes(self):\n # generate list of sets for each vms\n self._graph = [set() for _ in range(self._vm_count)]", "def set_hypercubes_parents_indices(self):\n for hypercube in self.hypercubes.flatten():\n coordinates = []\n for coord in hypercube.coords:\n coordinates.append([2 * coord, 2 * coord + 1])\n for indices in list(itertools.product(*coordinates)):\n hypercube.parent_hypercubes_indices.append(tuple(indices))", "def generate_all_locations(grid, shape):", "def all_env_ids(self) -> np.ndarray:", "def _setup_global_base(self):\n self._setup_facet_orientations()\n\n self._init_econn()\n\n n_dof = 0\n all_dofs = {}\n remaps = {}\n for ig, ap in self.aps.iteritems():\n ii = self.region.get_cells(ig)\n nd = nm.prod(ap.econn.shape)\n\n group = self.domain.groups[ig]\n remaps[ig] = prepare_remap(ii, group.shape.n_el)\n\n aux = nm.arange(n_dof, n_dof + nd, dtype=nm.int32)\n aux.shape = ap.econn.shape\n\n ap.econn[:] = aux\n all_dofs[ig] = aux\n\n n_dof += nd\n\n self.n_nod = n_dof\n\n self.n_bubble_dof = n_dof\n self.bubble_dofs = all_dofs\n self.bubble_remaps = remaps\n\n self.n_vertex_dof = self.n_edge_dof = self.n_face_dof = 0\n\n self._setup_esurface()", "def update_global_identifiers(self, universe_test):\n self.cellNum += 1\n self.surfaceNum += 1\n self.materialNum += 1\n if universe_test:\n self.universe += 1", "def get_local_ids(self,\n np.ndarray[uint32, mode='c', ndim=1] entities not None,\n int32 dent,\n np.ndarray[uint32, mode='c', ndim=1] incident not None,\n np.ndarray[uint32, mode='c', ndim=1] offsets not None,\n int32 dim):\n cdef Indices[1] _entities, _local_ids\n cdef MeshConnectivity _incident[1]\n cdef np.ndarray[uint32, mode='c', ndim=1] out\n\n if not entities.shape[0] > 0:\n return np.empty(0, dtype=np.uint32)\n\n _entities.num = entities.shape[0]\n _entities.indices = &entities[0]\n\n _incident.num = _entities.num\n _incident.n_incident = incident.shape[0]\n _incident.indices = &incident[0]\n _incident.offsets = &offsets[0]\n\n out = np.empty(_incident.n_incident, dtype=np.uint32)\n _local_ids.num = _incident.n_incident\n _local_ids.indices = &out[0]\n mesh_get_local_ids(self.mesh, _local_ids, _entities, dent, _incident, dim)\n\n return out", "def nsi_internal_local_clustering(self, node_list):\n return self.nsi_cross_local_clustering(node_list, node_list)", "def _exchange_ghosts_mpi(self):\n for d in xrange(self._dim):\n if d in self._cutdir_list:\n self._exchange_ghosts_mpi_d(d)\n else:\n self._exchange_ghosts_local_d(d)", "def cal_globalIndexH(self):\n h_local = self.cal_localIndexH()\n h_global = np.sum(h_local)\n\n return h_global", "def internal_global_clustering(self, node_list):\n clustering = self.local_clustering()\n internal_clustering = clustering[node_list].mean()\n return internal_clustering", "def map_to_local(self, idxs):\n return F.zerocopy_from_dgl_ndarray(\n _CAPI_DGLNDArrayPartitionMapToLocal(\n self._partition, F.zerocopy_to_dgl_ndarray(idxs)\n )\n )", "def _prog_field_indices(self):\n\n if self._pfi is not None:\n return self._pfi\n\n self.arbor._grow_tree(self)\n self._pfi = np.array([node.tree_id for node in self._prog_nodes])\n return self._pfi", "def iter_node_map(self):\n return self.d_inv.keys()", "def indices(online: bool = False) -> dict:\n return _get_indices(online)", "def get_local_neighbourhood_composition(self):\n neighbourhood_students = []\n\n #print(\"id, students\",self.unique_id, len(self.neighbourhood_students_indexes))\n neighbourhood_students = self.model.get_households_from_index(self.neighbourhood_students_indexes)\n local_neighbourhood_composition = get_counts_util(neighbourhood_students, self.model)\n #print(\"step \",self.model.schedule.steps,\" neighb students \",len(self.neighbourhood_students))\n\n return (local_neighbourhood_composition)", "def assign_dofs(self):\n i = 0\n for m in self._meshes:\n i = m.assign_dofs(start_i=i)\n self._ndofs = i\n return i", "def mesh_scoping(self):\n return self._mesh_scoping", "def mesh_scoping(self):\n return self._mesh_scoping", "def mesh_scoping(self):\n return self._mesh_scoping", "def get_local_params(self, par_global):\n return [\n par_global[a] if a is not None else b\n for a, b in zip(self._p_global_indices, self.p_local)\n ]", "def BuildLocalWorkList(M, C, skip_cache=False):\n\n # Evenly divide up the work among processes\n W = []\n for c in C:\n for m in M:\n if skip_cache:\n # if we want to skip we have to check that it is complete\n fname = os.path.join(c.output_path, \"%s_%s.nc\" % (c.name, m.name))\n complete = False\n if os.path.isfile(fname):\n try:\n with Dataset(fname) as dset:\n if \"complete\" in dset.ncattrs():\n if dset.complete:\n complete = True\n except:\n pass\n if not complete:\n os.system(\"rm -f %s\" % fname)\n W.append([m, c])\n else:\n W.append([m, c])\n\n wpp = float(len(W)) / size\n begin = int(round(rank * wpp))\n end = int(round((rank + 1) * wpp))\n localW = W[begin:end]\n\n # Determine who is the master of each confrontation\n for c in C:\n sendbuf = np.zeros(size, dtype=\"int\")\n for w in localW:\n if c is w[1]:\n sendbuf[rank] += 1\n recvbuf = None\n if rank == 0:\n recvbuf = np.empty([size, sendbuf.size], dtype=\"int\")\n comm.Gather(sendbuf, recvbuf, root=0)\n if rank == 0:\n numc = recvbuf.sum(axis=1)\n else:\n numc = np.empty(size, dtype=\"int\")\n comm.Bcast(numc, root=0)\n if rank == numc.argmax():\n c.master = True\n else:\n c.master = False\n\n return localW", "def index(self):\n # Check is multiple orders were given\n try:\n orders = list(iter(self.orders))\n except TypeError:\n orders = [self.orders]\n sites = self._epistasismap.sites\n x = [i for i in range(1, len(sites)) if len(sites[i]) in orders]\n # Add the zeroth element if included\n if 0 in orders:\n x = [0] + x\n return np.array(x)", "def getGlobalIdxVals( self, i : int ):\n return range(self._layout.starts[i],self._layout.ends[i])", "def get_local_neighbourhood_composition(self):\n\n local_neighbourhood_composition = get_counts_util(self.neighbourhood_students, self.model)\n #print(\"step \",self.model.schedule.steps,\" neighb students \",len(self.neighbourhood_students))\n\n return (local_neighbourhood_composition)", "def _init_symbol_tracker(self):\n # Initialize with an empty set\n atoms_indx = {symb: set([]) for symb in self.symbols}\n\n # Populate the sets\n for atom in self.atoms:\n symb = atom.symbol\n atoms_indx[symb].add(atom.index)\n return atoms_indx", "def map_graph_id(self):\n for graph_id in self.NX_GRAPHS:\n graph = self.NX_GRAPHS[graph_id]\n flag = 0\n try:\n for node_idx in range(len(graph.nodes)):\n if np.sum(\n np.array(graph.nodes[node_idx]['h']) == \n np.array(self.dgl_graph.nodes[node_idx].data['h'][0])) != 128:\n flag = -1\n break\n if flag == -1:\n continue\n else:\n self.graph_id = graph_id\n break\n except:\n pass", "def compute_node_positions(self):\n pass", "def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check", "def get_grid_index(init_grid_size, map_size, device):\n H_init, W_init = init_grid_size\n H, W = map_size\n idx = torch.arange(H * W, device=device).reshape(1, 1, H, W)\n idx = F.interpolate(idx.float(), [H_init, W_init], mode='nearest').long()\n return idx.flatten()", "def get_global_loads_for_static_analysis(self):\n try:\n\n cols = 1\n total_dof = DOF_PER_NODE_STRUCTURAL * len(self.preprocessor.nodes)\n\n _frequencies = np.array([0.], dtype=float)\n loads = np.zeros((total_dof, cols), dtype=complex)\n \n # elementary loads - element integration\n for element in self.preprocessor.structural_elements.values():\n position = element.global_dof\n # self-weight loads\n if self.preprocessor.project.weight_load:\n loads[position] += element.get_self_weighted_load(self.preprocessor.gravity_vector)\n # stress stiffening loads\n if self.preprocessor.project.internal_pressure_load:\n loads[position] += element.force_vector_stress_stiffening()\n # distributed loads\n if self.preprocessor.project.element_distributed_load:\n loads[position] += element.get_distributed_load()\n \n if self.preprocessor.project.external_nodal_loads:\n # nodal loads\n for node in self.preprocessor.nodes.values():\n if node.there_are_nodal_loads:\n position = node.global_dof\n if node.loaded_table_for_nodal_loads:\n temp_loads = [_frequencies if bc is None else bc for bc in node.nodal_loads]\n else:\n temp_loads = [_frequencies if bc is None else np.ones_like(_frequencies)*bc for bc in node.nodal_loads]\n loads[position, :] += temp_loads\n\n except Exception as _error_log:\n print(str(_error_log))\n \n return loads[self.unprescribed_indexes,:]", "def map_to_global(self, idxs, part_id):\n return F.zerocopy_from_dgl_ndarray(\n _CAPI_DGLNDArrayPartitionMapToGlobal(\n self._partition, F.zerocopy_to_dgl_ndarray(idxs), part_id\n )\n )", "def _load_cluster(self):", "def _initial_clusters(self):\n clusters = []\n for i in range(self.point_count):\n clusters.append(self._create_cluster_from_index(i))\n return clusters", "def ids(initial_state, dimension=3):\n\n\tglobal explored_nodes\n\n\tfor max_depth in range(sys.maxsize):\n\t\tsolution = search(\n\t\t\tinitial_state, \n\t\t\tFrontier(LifoQueue, update_filter=lambda x: False if x.depth > max_depth else x), \n\t\t\tdimension, \n\t\t\tcheck_explored=False\n\t\t)\n\n\t\tif(solution):\n\t\t\tsolution.explored = explored_nodes\n\t\t\treturn solution", "def indices(self):\n return self._kbounded_partitions", "def reference_nodes_idx(self) -> Dict[str, torch.Tensor]:\n return self.node_idx_references", "def _form_computation_graph(self, idx):\n _list, _set = list, set\n if type(idx) is int:\n node_layers = [np.array([idx], dtype=np.int64)]\n elif type(idx) is list:\n node_layers = [np.array(idx, dtype=np.int64)]\n\n for _ in range(self.n_layers):\n prev = node_layers[-1]\n arr = [node for node in prev]\n arr.extend([e[0] for node in arr for e in self.nbrs_t[node]])\n arr = np.array(_list(_set(arr)), dtype=np.int64)\n node_layers.append(arr)\n node_layers.reverse()\n\n mappings = [{j: i for (i, j) in enumerate(arr)} for arr in node_layers]\n\n return node_layers, mappings", "def localInitialize(self):\n SVL = self.readFromROM()\n self._generateQuadsAndPolys(SVL)\n #print out the setup for each variable.\n msg = self.printTag+' INTERPOLATION INFO:\\n'\n msg += ' Variable | Distribution | Quadrature | Polynomials\\n'\n for v in self.quadDict:\n msg += ' '+' | '.join([v,self.distDict[v].type,self.quadDict[v].type,self.polyDict[v].type])+'\\n'\n msg += ' Polynomial Set Degree: '+str(self.maxPolyOrder)+'\\n'\n msg += ' Polynomial Set Type : '+str(SVL.indexSetType)+'\\n'\n self.raiseADebug(msg)\n\n self.raiseADebug('Starting index set generation...')\n self.indexSet = IndexSets.factory.returnInstance(SVL.indexSetType)\n self.indexSet.initialize(self.features, self.importanceDict, self.maxPolyOrder)\n if self.indexSet.type=='Custom':\n self.indexSet.setPoints(SVL.indexSetVals)\n\n self.sparseGrid = Quadratures.factory.returnInstance(self.sparseGridType)\n self.raiseADebug(f'Starting {self.sparseGridType} sparse grid generation...')\n self.sparseGrid.initialize(self.features, self.indexSet, self.dists, self.quadDict, self.jobHandler)\n\n if self.writeOut is not None:\n msg = self.sparseGrid.__csv__()\n outFile = open(self.writeOut,'w')\n outFile.writelines(msg)\n outFile.close()\n\n self.limit=len(self.sparseGrid)\n self.raiseADebug(f'Size of Sparse Grid: {self.limit}')\n self.raiseADebug('Finished sampler generation.')\n\n self.raiseADebug('indexset:',self.indexSet)\n for SVL in self.ROM.supervisedContainer:\n SVL.initialize({'SG': self.sparseGrid,\n 'dists': self.dists,\n 'quads': self.quadDict,\n 'polys': self.polyDict,\n 'iSet': self.indexSet})", "def update_global_targets(all_targets, tile_targets):\n\n # loop over each target and check whether it hass been assigned to a fiber.\n for i_target in range(tile_targets.n):\n if(tile_targets.fiber[i_target]!=-1):\n loc = np.where(all_targets.id == tile_targets.id[i_target])\n if(np.size(loc)!=0):\n loc = loc[0]\n all_targets.n_observed[loc] = all_targets.n_observed[loc] + 1\n # TOWRITE: still have to make the update to ASSIGNEDTYPE and ASSIGNEDZ \n else:\n raise ValueError('The target id %d in tile was not found in general target list'%(tile_targets.id[i_target]))\n return", "def get_local_bov(bov, grid: Grid, key_points, descriptors, n_visuals):\n n_grid_cells = len(grid)\n local_bov_features = np.array([np.zeros(n_visuals) for i in range(n_grid_cells)])\n for i in range(len(key_points)):\n visual_id = bov.predict(descriptors[i].reshape(1, descriptors[i].shape[0]))\n cell_id = grid.get_cell_id(key_points[i])\n local_bov_features[cell_id][visual_id] += 1\n return local_bov_features.flatten()", "def initCitys(self):\n self.cities = []\n for vertex in self.metaGraph:\n self.cities.append(vertex)", "def _unique_surface_indices(self, surf_mesh):\n flattened = []\n for tup in surf_mesh:\n flattened += list(tup)\n return list(set(flattened))", "def init_processes(rank, size, backend='gloo'):\n os.environ['MASTER_ADDR'] = '12.12.10.13'\n os.environ['MASTER_PORT'] = '29500'\n dist.init_process_group(backend, rank=rank, world_size=size)", "def get_indexes(self):\n indexes = []\n for c in self.components:\n indexes.extend(c.get_indexes())\n return indexes", "def cfdProcessNodeTopology(self):\r\n self.nodeElements = self.cfdInvertConnectivity(self.elementNodes)\r\n self.nodeFaces = self.cfdInvertConnectivity(self.faceNodes)", "def all_sampled_nodes_indexes(self) -> torch.LongTensor:\n all_sampled_nodes_indexes: _typing.Any = self.__all_sampled_nodes_indexes\n return all_sampled_nodes_indexes", "def reference_nodes_graph_idx(self) -> Dict[str, torch.Tensor]:\n return self.node_graph_idx_reference", "def discover_map(self):\n frontier = Queue()\n cleared = {self.position}\n for pos in self._check_neighbors():\n frontier.put(pos)\n self.add_node(pos, self.position)\n while not frontier.empty():\n next = frontier.get()\n if next not in cleared:\n self.move_to(next)\n for pos in self._check_neighbors():\n self.add_node(pos, self.position)\n frontier.put(pos)\n cleared.add(self.position)\n\n return tuple(self.grid[2])[0]", "def _get_level_map(self):\n \n # get the initial group mapping across sub-domains just based on\n # particle IDs\n groups_map = self._get_gid_map()\n\n sc = self.sc\n\n sqc = SQLContext(sc)\n\n if self.DEBUG: \n print 'spark_fof DEBUG: groups in initial mapping = %d'%groups_map.cache().count()\n\n \n # create the spark GraphFrame with group IDs as nodes and group connections as edges\n v_df = sqc.createDataFrame(groups_map.flatMap(lambda x: x)\n .distinct()\n .map(lambda v: Row(id=int(v))))\n e_df = sqc.createDataFrame(groups_map.map(lambda (s,d): Row(src=int(s), dst=int(d))))\n\n # persist the graph, allowing it to spill to disk if necessary\n g_graph = graphframes.GraphFrame(v_df, e_df).persist(StorageLevel.MEMORY_AND_DISK_SER)\n \n # generate mapping\n def make_mapping(items): \n \"\"\"Helper function to generate mappings to lowest node ID\"\"\"\n compid, nodes = items\n nodes = list(nodes)\n base_node = min(nodes)\n return [(node,base_node) for node in nodes if node != base_node]\n \n nPartitions = sc.defaultParallelism*5\n\n timein = time.time()\n group_mapping = (g_graph.connectedComponents()\n .rdd.map(lambda row: (row.component, row.id))\n .groupByKey(nPartitions)\n .filter(lambda (k,v): len(v.data)>1)\n .flatMap(make_mapping)).cache()\n \n if self.DEBUG:\n print 'spark_fof DEBUG: groups in final mapping = %d'%len(mapping)\n\n print 'spark_fof: domain group mapping build took %f seconds'%(time.time()-timein)\n self.group_mapping = group_mapping\n\n return group_mapping", "def init_processes(fn, local_rank, backend='nccl'):\n dist.init_process_group(backend)\n fn(dist.get_rank(), dist.get_world_size(), local_rank)", "def topology_complete(self):\n\t\tfor i in range(len(self.sites) - 1):\n\t\t\tfor j in range(i + 1, len(self.sites)):\n\t\t\t\tself.sites[i].neighbors.append(self.sites[j])\n\t\t\t\tself.sites[j].neighbors.append(self.sites[i])", "def global_level(\n adata: AnnData,\n use_label: str = \"louvain\",\n use_rep: str = \"X_pca\",\n n_dims: int = 40,\n list_clusters: list = [],\n return_graph: bool = False,\n w: float = None,\n verbose: bool = True,\n copy: bool = False,\n) -> Optional[AnnData]:\n\n assert w <= 1, \"w should be in range 0 to 1\"\n # Get global graph\n G = _read_graph(adata, \"global_graph\")\n # Convert to directed graph\n H = G.to_directed()\n\n cat_inds = adata.uns[use_label + \"_index_dict\"]\n inds_cat = {v: k for (k, v) in cat_inds.items()}\n\n # Query cluster\n if type(list_clusters[0]) == str:\n list_clusters = [cat_inds[label] for label in list_clusters]\n query_nodes = list_clusters\n\n query_nodes = ordering_nodes(query_nodes, use_label, adata)\n if verbose:\n print(\n \"Start to construct the trajectory: \"\n + \" -> \".join(np.array(query_nodes).astype(str))\n )\n\n query_dict = {}\n order_dict = {}\n\n for i in query_nodes:\n order = 0\n for j in adata.obs[adata.obs[use_label] == str(inds_cat[i])][\n \"sub_cluster_labels\"\n ].unique():\n query_dict[int(j)] = int(i)\n order_dict[int(j)] = int(order)\n\n order += 1\n dm_list = []\n sdm_list = []\n order_big_dict = {}\n edge_list = []\n\n for i, j in enumerate(query_nodes):\n order_big_dict[j] = int(i)\n if i == len(query_nodes) - 1:\n break\n for j in adata.uns[\"split_node\"][query_nodes[i]]:\n for k in adata.uns[\"split_node\"][query_nodes[i + 1]]:\n edge_list.append((int(j), int(k)))\n\n # Calculate DPT distance matrix\n dm_list.append(\n ge_distance_matrix(\n adata,\n inds_cat[query_nodes[i]],\n inds_cat[query_nodes[i + 1]],\n use_label=use_label,\n use_rep=use_rep,\n n_dims=n_dims,\n )\n )\n # Calculate Spatial distance matrix\n sdm_list.append(\n spatial_distance_matrix(\n adata,\n inds_cat[query_nodes[i]],\n inds_cat[query_nodes[i + 1]],\n use_label=use_label,\n )\n )\n\n # Get centroid dictionary\n centroid_dict = adata.uns[\"centroid_dict\"]\n centroid_dict = {int(key): centroid_dict[key] for key in centroid_dict}\n\n H_sub = H.edge_subgraph(edge_list)\n if not nx.is_connected(H_sub.to_undirected()):\n raise ValueError(\n \"The chosen clusters are not available to construct the spatial trajectory! Please choose other path.\"\n )\n H_sub = nx.DiGraph(H_sub)\n prepare_root = []\n for node in adata.uns[\"split_node\"][query_nodes[0]]:\n H_sub.add_edge(9999, int(node))\n prepare_root.append(centroid_dict[int(node)])\n\n prepare_root = np.array(prepare_root)\n centroide = (\n sum(prepare_root[:, 0]) / len(prepare_root[:, 0]),\n sum(prepare_root[:, 1]) / len(prepare_root[:, 1]),\n )\n\n # Get centroid dictionary\n centroid_dict = adata.uns[\"centroid_dict\"]\n centroid_dict = {int(key): centroid_dict[key] for key in centroid_dict}\n\n H_sub = H.edge_subgraph(edge_list)\n H_sub = nx.DiGraph(H_sub)\n\n prepare_root = []\n for node in adata.uns[\"split_node\"][query_nodes[0]]:\n H_sub.add_edge(9999, int(node))\n prepare_root.append(centroid_dict[int(node)])\n\n prepare_root = np.array(prepare_root)\n centroide = (\n sum(prepare_root[:, 0]) / len(prepare_root[:, 0]),\n sum(prepare_root[:, 1]) / len(prepare_root[:, 1]),\n )\n centroid_dict[9999] = np.array(centroide)\n\n labels = nx.get_edge_attributes(H_sub, \"weight\")\n\n for edge, _ in labels.items():\n\n dm = dm_list[order_big_dict[query_dict[edge[0]]]]\n sdm = sdm_list[order_big_dict[query_dict[edge[0]]]]\n\n weight = dm[order_dict[edge[0]], order_dict[edge[1]]] * w + sdm[\n order_dict[edge[0]], order_dict[edge[1]]\n ] * (1 - w)\n H_sub[edge[0]][edge[1]][\"weight\"] = weight\n # tmp = H_sub\n\n H_sub = nx.algorithms.tree.minimum_spanning_arborescence(H_sub)\n H_nodes = list(range(len(H_sub.nodes)))\n\n node_convert = {}\n for pair in zip(list(H_sub.nodes), H_nodes):\n node_convert[pair[1]] = pair[0]\n\n adata.uns[\"PTS_graph\"] = {}\n adata.uns[\"PTS_graph\"][\"graph\"] = nx.to_scipy_sparse_matrix(H_sub)\n adata.uns[\"PTS_graph\"][\"node_dict\"] = node_convert\n\n if return_graph:\n return H_sub", "def local_to_global(local_index):\n return global_index.value.get(tokens.value[local_index], -1)", "def global_index( self , active_index = None, ijk = None):\n return self.__global_index( active_index = active_index , ijk = ijk )", "def __global_index( self , active_index = None , global_index = None , ijk = None):\n\n set_count = 0\n if not active_index is None:\n set_count += 1\n\n if not global_index is None:\n set_count += 1\n\n if ijk:\n set_count += 1\n \n if not set_count == 1:\n raise ValueError(\"Exactly one of the kewyord arguments active_index, global_index or ijk must be set\")\n \n if not active_index is None:\n global_index = self._get_global_index1A( active_index )\n elif ijk:\n nx = self.getNX()\n ny = self.getNY()\n nz = self.getNZ()\n \n i,j,k = ijk\n\n if not 0 <= i < nx:\n raise IndexError(\"Invalid value i:%d Range: [%d,%d)\" % (i , 0 , nx)) \n\n if not 0 <= j < ny:\n raise IndexError(\"Invalid value j:%d Range: [%d,%d)\" % (j , 0 , ny)) \n \n if not 0 <= k < nz:\n raise IndexError(\"Invalid value k:%d Range: [%d,%d)\" % (k , 0 , nz)) \n\n global_index = self._get_global_index3( i,j,k)\n else:\n if not 0 <= global_index < self.getGlobalSize():\n raise IndexError(\"Invalid value global_index:%d Range: [%d,%d)\" % (global_index , 0 , self.getGlobalSize())) \n return global_index", "def get_global_index( self , ijk = None , active_index = None):\n gi = self.__global_index( active_index = active_index , ijk = ijk)\n return gi", "def local_to_global(npboxes: np.ndarray, window: Box):\n xmin = window.xmin\n ymin = window.ymin\n return npboxes + np.array([[ymin, xmin, ymin, xmin]])", "def build_amg_index_sets(L_sizes):\n neqns = L_sizes[0][0]\n velocityDOF=[]\n for start in range(1,3):\n velocityDOF.append(np.arange(start=start,\n stop=1+neqns,\n step=3,\n dtype='i'))\n velocityDOF_full=np.vstack(velocityDOF).transpose().flatten()\n velocity_u_DOF = []\n velocity_u_DOF.append(np.arange(start=0,\n stop=2*neqns//3,\n step=2,\n dtype='i'))\n velocity_u_DOF_full = np.vstack(velocity_u_DOF).transpose().flatten()\n velocity_v_DOF = []\n velocity_v_DOF.append(np.arange(start=1,\n stop=1+2*neqns//3,\n step=2,\n dtype='i'))\n velocity_v_DOF_full = np.vstack(velocity_v_DOF).transpose().flatten()\n isvelocity = PETSc.IS()\n isvelocity.createGeneral(velocityDOF_full)\n isu = PETSc.IS()\n isu.createGeneral(velocity_u_DOF_full)\n isv = PETSc.IS()\n isv.createGeneral(velocity_v_DOF_full)\n return [isvelocity, isu, isv]", "def local_nonant_cache(self):\n if not self._ran:\n raise RuntimeError(\"Need to call WheelSpinner.run() before querying solutions.\")\n local_xhats = dict()\n for k,s in self.spcomm.opt.local_scenarios.items():\n for node in s._mpisppy_node_list:\n if node.name not in local_xhats:\n local_xhats[node.name] = [\n value(var) for var in node.nonant_vardata_list]\n return local_xhats", "def _local_search(self):\n\n # Set occupancies of rigid cluster and its direct neighboring atoms to\n # 1 for clash detection and MIQP\n selection = self.ligand._selection\n self.ligand._active[selection] = True\n center = self.ligand.coor[self._cluster].mean(axis=0)\n new_coor_set = []\n new_bs = []\n for coor, b in zip(self._coor_set, self._bs):\n self.ligand._coor[selection] = coor\n self.ligand._b[selection] = b\n rotator = GlobalRotator(self.ligand, center=center)\n for rotmat in RotationSets.get_local_set():\n rotator(rotmat)\n translator = Translator(self.ligand)\n iterator = itertools.product(\n *[np.arange(*trans) for trans in self._trans_box]\n )\n for translation in iterator:\n translator(translation)\n new_coor = self.ligand.coor\n if self.options.remove_conformers_below_cutoff:\n values = self.xmap.interpolate(new_coor)\n mask = self.ligand.e != \"H\"\n if np.min(values[mask]) < self.options.density_cutoff:\n continue\n if self.options.external_clash:\n if not self._cd() and not self.ligand.clashes():\n if new_coor_set:\n delta = np.array(new_coor_set) - np.array(new_coor)\n if (\n np.sqrt(\n min(np.square((delta)).sum(axis=2).sum(axis=1))\n )\n >= self.options.rmsd_cutoff\n ):\n new_coor_set.append(new_coor)\n new_bs.append(b)\n else:\n new_coor_set.append(new_coor)\n new_bs.append(b)\n elif not self.ligand.clashes():\n if new_coor_set:\n delta = np.array(new_coor_set) - np.array(new_coor)\n if (\n np.sqrt(min(np.square((delta)).sum(axis=2).sum(axis=1)))\n >= self.options.rmsd_cutoff\n ):\n new_coor_set.append(new_coor)\n new_bs.append(b)\n else:\n new_coor_set.append(new_coor)\n new_bs.append(b)\n self.ligand._active[self.ligand._selection] = False\n selection = self.ligand._selection[self._cluster]\n self.ligand._active[selection] = True\n for atom in self._cluster:\n atom_sel = self.ligand._selection[self.ligand.connectivity[atom]]\n self.ligand._active[atom_sel] = True\n self.conformer = self.ligand\n self._coor_set = new_coor_set\n self._bs = new_bs\n if len(self._coor_set) < 1:\n logger.warning(\n f\"{self.ligand.resn[0]}: \"\n f\"Local search {self._cluster_index}: {len(self._coor_set)} conformers\"\n )\n return\n\n # QP score conformer occupancy\n logger.debug(\"Converting densities.\")\n self._convert()\n self._solve_qp()\n logger.debug(\"Updating conformers\")\n self._update_conformers()\n if self.options.write_intermediate_conformers:\n self._write_intermediate_conformers(prefix=\"localsearch_ligand_qp\")\n if len(self._coor_set) < 1:\n logger.warning(\n f\"{self.ligand.resn[0]}: \"\n f\"Local search QP {self._cluster_index}: {len(self._coor_set)} conformers\"\n )\n return\n\n # MIQP score conformer occupancy\n self._convert()\n self._solve_miqp(\n threshold=self.options.threshold, cardinality=self.options.cardinality\n )\n self._update_conformers()\n if self.options.write_intermediate_conformers:\n self._write_intermediate_conformers(prefix=\"localsearch_ligand_miqp\")", "def run(self, infected_graph):\n pos = nx.spring_layout(infected_graph)\n points = np.zeros((len(pos), 2))\n i = 0\n for p in pos:\n points[i] = pos[p]\n i += 1\n \n hull = ConvexHull(points)\n nodes = list(pos)\n return [nodes[p] for p in hull.vertices]", "def cluster_state(self):\n for ip in set([status.ip for status in self.cluster_status]):\n yield self.node_state(ip)", "def _initilise_graph_db(self):\n for collector in self.collectors:\n collector.init_graph_db()", "def main_rep_idxs(self):\n\n if '{}/{}'.format(SETTINGS, MAIN_REP_IDXS) in self.h5:\n return self.h5['{}/{}'.format(SETTINGS, MAIN_REP_IDXS)][:]\n else:\n return None", "def _get_indices(self, indices: VecEnvIndices) -> Iterable[int]:\n if indices is None:\n indices = range(self.num_envs)\n elif isinstance(indices, int):\n indices = [indices]\n return indices", "def ghosts(self):\n return self.mesh.discretization.ghosts", "def globalNeighbors (listAtom, count):\n\n for atom in listAtom:\n nbNeighbor = numberNeigthbor(atom[\"neighbors\"])\n for neighbor in atom[\"neighbors\"]:\n if not nbNeighbor in count.keys():\n count[nbNeighbor] = structure.countElements()\n\n if neighbor[\"element\"] in count[nbNeighbor].keys():\n count[nbNeighbor][neighbor[\"element\"]] = count[nbNeighbor][neighbor[\"element\"]] + 1\n\n else:\n count[nbNeighbor][\"others\"] = count[nbNeighbor][\"others\"] + 1", "def learn_local_servers(self):\n assert len(self.mylinks) > 0\n assert len(self.switches) > 0\n assert self.graph != None\n\n localservers = []\n for srv in self.servers:\n neighbor_sw = self.graph.neighbors(srv)\n if len(neighbor_sw) != 1:\n raise NotImplementedError(\"Single server links only\")\n else:\n neighbor_sw = neighbor_sw[0]\n if (neighbor_sw in self.switches):\n localservers.append(srv)\n\n # remove duplicates\n self.localservers = list(set(localservers))", "def __build_state_index(self):\n\n # the index for the system state\n # [rho_i, q_in, q_out, r_i, f_i]\n x_index = {}\n\n # add the density index\n x_index['density'] = OrderedDict()\n for i in range(0, self.num_cells):\n x_index['density'][i] = i\n dim_state = self.num_cells\n\n # add the upstream boundary flow\n x_index['qin'] = dim_state\n x_index['qout'] = dim_state + 1\n dim_state += 2\n\n # add on ramp variables\n # x_index['onramp'] = OrderedDict()\n # if self.cell_onramp is not None:\n # # if onramp exist in the network, otherwise skip\n # for cell_id in self.cell_onramp:\n # # add the absolute index into the state index dictionary\n # # r_i index = self.x_index{'onramp'][cell_i]\n # x_index['onramp'][cell_id] = dim_state\n # dim_state += 1\n #\n #\n # # add off ramp state variables\n # x_index['offramp'] = OrderedDict()\n # if self.cell_offramp is not None:\n # for cell_id in self.cell_offramp:\n # # add the absolute index\n # x_index['offramp'][cell_id] = dim_state\n # dim_state += 1\n\n return x_index, dim_state", "def getLandmarkindices(self):\n return self.subsetnodes_indices", "def target_nodes_indexes(self) -> _TargetNodes:\n return self.__target_nodes_indexes", "def _MocaCtlGetNodeIDs(self):\n mc = subprocess.Popen([MOCACTL, 'showtbl', '--nodestats'],\n stdout=subprocess.PIPE)\n out, _ = mc.communicate(None)\n nodes = set()\n for line in out.splitlines():\n node = NODE_RE.search(line)\n if node is not None:\n nodes.add(int(node.group(1)))\n node_list = list(nodes)\n length = len(node_list)\n if int(self.AssociatedDeviceCount) != length:\n type(self).AssociatedDeviceCount.Set(self, length)\n return node_list", "def all_hosts(self):\n ...", "def get_param_indexes(self):\n self.debug.start_function('get_param_indexes')\n\n for i, key in enumerate(self.mcmc_version.param_keys):\n self.param_idxs[key] = i\n for i, key in enumerate(self.mcmc_version.interp_keys):\n self.interp_idxs[key] = i\n\n self.debug.end_function()", "def _get_cluster_list(self):\n return self.__cluster_list", "def all_node_ids(self):\n return [i for i in range(0, self.n_inputs + self.n_hidden + self.n_outputs)]", "def init_processes(rank, run_id, hosts, backend='gloo'):\n hosts = hosts.split(',')\n os.environ['MASTER_ADDR'] = hosts[0] # first worker is the master worker\n os.environ['MASTER_PORT'] = '29500'\n world_size = len(hosts)\n os.environ['WORLD_SIZE'] = str(world_size)\n os.environ['RANK'] = str(rank)\n dist.init_process_group(backend, rank=rank, world_size=world_size)\n run(rank, world_size, run_id)", "def master():\n global fileName, hyp, generation, rep_type, gen\n\n start_t = datetime.datetime.now()\n print('started at ', start_t)\n\n for iter_i in range(iter_num):\n print('iter ', iter_i)\n make_new_file(iter_i)\n\n data = DataGatherer(fileName, hyp)\n neat = Neat(hyp, rep_type)\n\n for gen in range(generation):\n pop = neat.ask() # Get newly evolved individuals from NEAT\n reward = batchMpiEval(pop, gen=neat.gen, sp_count=len(neat.species)) # Send pop to be evaluated by workers\n neat.tell(reward) # Send fitness to NEAT\n neat.probMoo() # Rank population according to objectivess\n neat.speciate() # Divide population into species\n\n data = gatherData(data,neat,gen,iter_i,hyp,savePop=True)\n print(gen, '\\t - \\t', data.display())\n print('\\t', len(neat.species))\n\n neat.gen += 1\n\n # Clean up and data gathering at run end\n data = gatherData(data,neat,gen,iter_i,hyp,savePop=False)\n data.save()\n data.savePop(neat.pop,fileName) # Save population as 2D numpy arrays\n\n print('finish at ', datetime.datetime.now())\n print('total time ', datetime.datetime.now()-start_t)\n\n stopAllWorkers()", "def get_indices(self):\r\n return self._indices", "def init_distributed(args: dict):\n\n if is_distributed(args):\n dist.init_process_group(backend=\"nccl\")\n torch.cuda.set_device(args.local_rank)", "def _get_localLocator(self):\n return self.__localLocator", "def get_clusters(cluster_path): #{{{\n print 'loading cluster info'\n indicesToParticle = pickle.load(open(cluster_path+\"/verticesToParticle.p\",\"rb\"))\n indicesOnCluster = pickle.load(open(cluster_path+\"/verticesOnCell.p\",\"rb\"))\n maxIndices = pickle.load(open(cluster_path+\"/maxVertices.p\",\"rb\"))\n print 'done'\n\n return indicesToParticle, indicesOnCluster, maxIndices #}}}", "def _reinit_indexes(self):\n print('Reinitializing indexes...')\n for identity in self.groundtruth_metadata.keys():\n self.groundtruth_metadata[identity]['index'] = 0\n print('Indexes reinitialized!')" ]
[ "0.6674267", "0.607246", "0.59368324", "0.58657676", "0.5651199", "0.56479245", "0.56453234", "0.5612734", "0.5533967", "0.5532873", "0.5474459", "0.5465615", "0.5442727", "0.5435518", "0.54162186", "0.5397795", "0.535609", "0.5344314", "0.534363", "0.5338216", "0.5314248", "0.52974224", "0.52903426", "0.52683043", "0.5259715", "0.5232956", "0.52313954", "0.5181487", "0.51801115", "0.51647544", "0.5163645", "0.51168114", "0.5109594", "0.51051354", "0.51051354", "0.51051354", "0.5091828", "0.5069085", "0.5065785", "0.5059294", "0.50592524", "0.5051468", "0.50433433", "0.5033252", "0.502981", "0.50105524", "0.50082177", "0.50051004", "0.50015104", "0.49873784", "0.49863306", "0.49699476", "0.49683842", "0.49669537", "0.49516696", "0.4947192", "0.4941338", "0.49368605", "0.49363443", "0.49351165", "0.49349007", "0.49347967", "0.4929381", "0.4928368", "0.49009204", "0.48953834", "0.48906335", "0.48879203", "0.48855883", "0.4869368", "0.48636985", "0.4854878", "0.48543105", "0.48526996", "0.48491344", "0.48489565", "0.48467875", "0.48445782", "0.48440439", "0.48368284", "0.48328134", "0.48226535", "0.48167726", "0.481165", "0.48099408", "0.48073557", "0.48057312", "0.4803654", "0.48023376", "0.47985154", "0.47936815", "0.47932634", "0.4785926", "0.47853205", "0.4774987", "0.47726297", "0.477176", "0.47596827", "0.4759215", "0.47572854" ]
0.70098007
0
This functions does the same thing as gather_global_indices but may also work when topo is None. The function is usefull if you need to collect global indices on a topo define only on a subset of comm, when for the procs not in this subset, topo will be equal to None. In such a case, comm and dom are required. This may happen when you want to build a bridge between two topologies that do not handle the same number of processes but with an overlap between the two groups of processes of the topologies. In that case, a call to gather_global_indices(topo, comm, dom) will work on all processes belonging to comm, topo being None or not. The values corresponding to ranks not in topo will be empty slices.
def gather_global_indices_overlap(topo=None, comm=None, dom=None, toslice=True, root=None): if topo is None: assert comm is not None and dom is not None size = comm.Get_size() rank = comm.Get_rank() dimension = dom.dimension iglob = npw.int_zeros((dimension * 2, size)) iglob_res = npw.int_zeros((dimension * 2, size)) iglob[1::2, rank] = -1 if root is None: comm.Allgather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT]) else: comm.Gather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT], root=root) if toslice: return utils.arrayToDict(iglob_res) else: return iglob_res else: return TopoTools.gather_global_indices(topo, toslice, root, comm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gather_global_indices(topo, toslice=True, root=None, comm=None):\n if comm is None:\n comm = topo.parent()\n size = comm.size\n start = topo.mesh.start()\n end = topo.mesh.stop() - 1\n # communicator that owns the topology\n rank = comm.Get_rank()\n dimension = topo.domain.dimension\n iglob = npw.int_zeros((dimension * 2, size))\n iglob_res = npw.int_zeros((dimension * 2, size))\n iglob[0::2, rank] = start\n iglob[1::2, rank] = end\n # iglob is saved as a numpy array and then transform into\n # a dict of slices since mpi send operations are much\n # more efficient with numpy arrays.\n if root is None:\n comm.Allgather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT])\n else:\n comm.Gather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT],\n root=root)\n\n if toslice:\n return utils.arrayToDict(iglob_res)\n else:\n return iglob_res", "def mpi_index_maps(loc_idx, shape, topology, coords, comm):\n\n nprocs = comm.Get_size()\n\n # Gather data structures from all ranks in order to produce the\n # relevant mappings.\n dat_len = np.zeros(topology, dtype=tuple)\n for j in range(nprocs):\n dat_len[coords[j]] = comm.bcast(shape, root=j)\n if any(k == 0 for k in dat_len[coords[j]]):\n dat_len[coords[j]] = as_tuple([0]*len(dat_len[coords[j]]))\n\n # If necessary, add the time index to the `topology` as this will\n # be required to correctly construct various maps.\n if len(np.amax(dat_len)) > len(topology):\n topology = as_list(topology)\n coords = [as_list(l) for l in coords]\n for _ in range(len(np.amax(dat_len)) - len(topology)):\n topology.insert(0, 1)\n for e in coords:\n e.insert(0, 0)\n topology = as_tuple(topology)\n coords = as_tuple([as_tuple(i) for i in coords])\n dat_len = dat_len.reshape(topology)\n dat_len_cum = distributed_data_size(dat_len, coords, topology)\n\n # This 'transform' will be required to produce the required maps\n transform = []\n for i in as_tuple(loc_idx):\n if isinstance(i, slice):\n if i.step is not None:\n transform.append(slice(None, None, np.sign(i.step)))\n else:\n transform.append(slice(None, None, None))\n else:\n transform.append(slice(0, 1, None))\n transform = as_tuple(transform)\n\n global_size = dat_len_cum[coords[-1]]\n\n indices = np.zeros(global_size, dtype=tuple)\n global_si = np.zeros(global_size, dtype=tuple)\n it = np.nditer(indices, flags=['refs_ok', 'multi_index'])\n while not it.finished:\n index = it.multi_index\n indices[index] = index\n it.iternext()\n global_si[:] = indices[transform]\n\n # Create the 'rank' slices\n rank_slice = []\n for j in coords:\n this_rank = []\n for k in dat_len[j]:\n this_rank.append(slice(0, k, 1))\n rank_slice.append(this_rank)\n # Normalize the slices:\n n_rank_slice = []\n for i in range(len(rank_slice)):\n my_coords = coords[i]\n if any([j.stop == j.start for j in rank_slice[i]]):\n n_rank_slice.append(as_tuple([None]*len(rank_slice[i])))\n continue\n if i == 0:\n n_rank_slice.append(as_tuple(rank_slice[i]))\n continue\n left_neighbours = []\n for j in range(len(my_coords)):\n left_coord = list(my_coords)\n left_coord[j] -= 1\n left_neighbours.append(as_tuple(left_coord))\n left_neighbours = as_tuple(left_neighbours)\n n_slice = []\n for j in range(len(my_coords)):\n if left_neighbours[j][j] < 0:\n start = 0\n stop = dat_len_cum[my_coords][j]\n else:\n start = dat_len_cum[left_neighbours[j]][j]\n stop = dat_len_cum[my_coords][j]\n n_slice.append(slice(start, stop, 1))\n n_rank_slice.append(as_tuple(n_slice))\n n_rank_slice = as_tuple(n_rank_slice)\n\n # Now fill each elements owner:\n owners = np.zeros(global_size, dtype=np.int32)\n send = np.zeros(global_size, dtype=np.int32)\n for i in range(len(n_rank_slice)):\n if any([j is None for j in n_rank_slice[i]]):\n continue\n else:\n owners[n_rank_slice[i]] = i\n send[:] = owners[transform]\n\n # Construct local_si\n local_si = np.zeros(global_size, dtype=tuple)\n it = np.nditer(local_si, flags=['refs_ok', 'multi_index'])\n while not it.finished:\n index = it.multi_index\n owner = owners[index]\n my_slice = n_rank_slice[owner]\n rnorm_index = []\n for j, k in zip(my_slice, index):\n rnorm_index.append(k-j.start)\n local_si[index] = as_tuple(rnorm_index)\n it.iternext()\n return owners, send, global_si, local_si", "def gather_dof_coordinates(V: FunctionSpace, dofs: np.ndarray):\n x = V.tabulate_dof_coordinates()\n local_dofs = dofs[dofs < V.dofmap.index_map.size_local * V.dofmap.index_map_bs]\n coords = x[local_dofs]\n num_nodes = len(coords)\n glob_num_nodes = MPI.COMM_WORLD.allreduce(num_nodes, op=MPI.SUM)\n recvbuf = None\n if MPI.COMM_WORLD.rank == 0:\n recvbuf = np.zeros(3 * glob_num_nodes, dtype=np.float64)\n sendbuf = coords.reshape(-1)\n sendcounts = np.array(MPI.COMM_WORLD.gather(len(sendbuf), 0))\n MPI.COMM_WORLD.Gatherv(sendbuf, (recvbuf, sendcounts), root=0)\n glob_coords = MPI.COMM_WORLD.bcast(recvbuf, root=0).reshape((-1, 3))\n return glob_coords", "def get_local_indices(self, part, ctx):\n return self.map_to_global(\n F.arange(0, self.local_size(part), ctx=ctx), part\n )", "def getGlobalIndices( self, indices: list):\n result = indices.copy()\n for i,toAdd in enumerate(self._layout.starts):\n result[self._layout.dims_order[i]]=indices[i]+toAdd\n return result", "def get_prescribed_indexes(self):\n global_prescribed = []\n for node in self.preprocessor.nodes.values():\n if node.there_are_prescribed_dofs:\n starting_position = node.global_index * DOF_PER_NODE_STRUCTURAL\n dofs = np.array(node.get_prescribed_dofs_bc_indexes()) + starting_position\n global_prescribed.extend(dofs)\n return global_prescribed", "def __global_index( self , active_index = None , global_index = None , ijk = None):\n\n set_count = 0\n if not active_index is None:\n set_count += 1\n\n if not global_index is None:\n set_count += 1\n\n if ijk:\n set_count += 1\n \n if not set_count == 1:\n raise ValueError(\"Exactly one of the kewyord arguments active_index, global_index or ijk must be set\")\n \n if not active_index is None:\n global_index = self._get_global_index1A( active_index )\n elif ijk:\n nx = self.getNX()\n ny = self.getNY()\n nz = self.getNZ()\n \n i,j,k = ijk\n\n if not 0 <= i < nx:\n raise IndexError(\"Invalid value i:%d Range: [%d,%d)\" % (i , 0 , nx)) \n\n if not 0 <= j < ny:\n raise IndexError(\"Invalid value j:%d Range: [%d,%d)\" % (j , 0 , ny)) \n \n if not 0 <= k < nz:\n raise IndexError(\"Invalid value k:%d Range: [%d,%d)\" % (k , 0 , nz)) \n\n global_index = self._get_global_index3( i,j,k)\n else:\n if not 0 <= global_index < self.getGlobalSize():\n raise IndexError(\"Invalid value global_index:%d Range: [%d,%d)\" % (global_index , 0 , self.getGlobalSize())) \n return global_index", "def serendipity_indices(\n total: int, linear: int, dim: int, done: typing.Optional[typing.List[int]] = None\n) -> typing.List[typing.List[int]]:\n if done is None:\n done = []\n if len(done) == dim:\n if done.count(1) >= linear:\n return [done]\n return []\n if len(done) == dim - 1:\n return serendipity_indices(total, linear, dim, done=done + [total - sum(done)])\n out = []\n for i in range(total - sum(done) + 1):\n out += serendipity_indices(total, linear, dim, done + [i])\n return out", "def get_sgrna_global_indices(sgrna_df, seq_start, seq_end, strand, sg_positions=None):\n indexed_sgrna_df = sgrna_df.copy()\n indexed_sgrna_df['sgrna_global_start'] = calculate_global_position(strand, seq_start, seq_end,\n indexed_sgrna_df['sgrna_relative_start'])\n if sg_positions is not None:\n for pos in sg_positions:\n indexed_sgrna_df['sgrna_global_' + str(pos)] = traverse_global_position(strand,\n indexed_sgrna_df['sgrna_global_start'],\n pos-1)\n indexed_sgrna_df = indexed_sgrna_df.drop('sgrna_relative_start', axis=1)\n return indexed_sgrna_df", "def dist_init(\n local_rank: int,\n num_procs: int,\n *func_args: list[Any],\n **func_kwargs: dict[str, Any],\n ) -> None:\n os.environ['MASTER_ADDR'] = '127.0.0.1'\n os.environ['MASTER_PORT'] = '29503'\n os.environ['LOCAL_RANK'] = str(local_rank)\n # NOTE: unit tests don't support multi-node so\n # local_rank == global rank\n os.environ['RANK'] = str(local_rank)\n os.environ['WORLD_SIZE'] = str(num_procs)\n\n dist.init_process_group('gloo')\n\n run_func(*func_args, **func_kwargs)\n\n # Keep faster ranks from exiting and breaking process group\n dist.barrier()", "def index(self):\n # Check is multiple orders were given\n try:\n orders = list(iter(self.orders))\n except TypeError:\n orders = [self.orders]\n sites = self._epistasismap.sites\n x = [i for i in range(1, len(sites)) if len(sites[i]) in orders]\n # Add the zeroth element if included\n if 0 in orders:\n x = [0] + x\n return np.array(x)", "def global_index(self):\n raise NotImplementedError", "def cal_globalIndexH(self):\n h_local = self.cal_localIndexH()\n h_global = np.sum(h_local)\n\n return h_global", "def toglobal(self, attr=()):\n if self.comm is None:\n raise RuntimeError('This array is not a local image.')\n counts = []\n offsets = [0]\n for rank in range(self.comm.Get_size()):\n s = split_work(self.shape_global[0], rank=rank, comm=self.comm)\n n = (s.stop - s.start) * np.product(self.shape_global[1:])\n counts.append(n)\n offsets.append(offsets[-1] + n)\n offsets.pop()\n s = split_work(self.shape_global[0], comm=self.comm)\n n = s.stop - s.start\n output = self.empty(self.shape_global, dtype=self.dtype,\n comm=MPI.COMM_SELF)\n output.__array_finalize__(self)\n t = MPI.BYTE.Create_contiguous(self.dtype.itemsize)\n t.Commit()\n self.comm.Allgatherv([self[0:n], t], [output.view(np.byte), (counts, offsets), t])\n\n for a in attr:\n i = getattr(self, a, None)\n if i is None:\n continue\n o = np.empty(self.shape_global, dtype=i.dtype)\n t = MPI.BYTE.Create_contiguous(i.dtype.itemsize)\n t.Commit()\n self.comm.Allgatherv([i[0:n], t], [o, (counts, offsets), t])\n setattr(output, a, o)\n \n output.comm = MPI.COMM_SELF\n\n return output", "def init_retrieval(self, distributed_port):\n\n logger.info(\"initializing retrieval\")\n\n # initializing a separate process group for retrievel as the default\n # nccl backend doesn't support gather/scatter operations while gloo\n # is too slow to replace nccl for the core gpu communication\n if dist.is_initialized():\n logger.info(\"dist initialized\")\n # needs to be set manually\n os.environ[\"GLOO_SOCKET_IFNAME\"] = self._infer_socket_ifname()\n # avoid clash with the NCCL port\n os.environ[\"MASTER_PORT\"] = str(distributed_port + 1)\n self.process_group = dist.new_group(ranks=None, backend=\"gloo\")\n\n # initialize retriever only on the main worker\n if not dist.is_initialized() or self._is_main():\n logger.info(\"dist not initialized / main\")\n self.retriever.init_index()\n\n # all processes wait untill the retriever is initialized by the main process\n if dist.is_initialized():\n torch.distributed.barrier(group=self.process_group)", "def get_unprescribed_indexes(self):\n total_dof = DOF_PER_NODE_STRUCTURAL * len(self.preprocessor.nodes)\n all_indexes = np.arange(total_dof)\n return np.delete(all_indexes, self.prescribed_indexes)", "def gpus_for_process(process_idx: int, num_gpus_per_process: int, gpu_mask: Optional[List[int]] = None) -> List[int]:\n\n available_gpus = get_available_gpus()\n if gpu_mask is not None:\n assert len(available_gpus) >= len(\n gpu_mask\n ), f\"Number of available GPUs ({len(available_gpus)}) is less than number of GPUs in mask ({len(gpu_mask)})\"\n available_gpus = [available_gpus[g] for g in gpu_mask]\n num_gpus = len(available_gpus)\n\n gpus_to_use = []\n if num_gpus == 0:\n return gpus_to_use\n\n first_gpu_idx = process_idx * num_gpus_per_process\n for i in range(num_gpus_per_process):\n index_mod_num_gpus = (first_gpu_idx + i) % num_gpus\n gpus_to_use.append(index_mod_num_gpus)\n\n log.debug(\n f\"Using GPUs {gpus_to_use} for process {process_idx} (actually maps to GPUs {[available_gpus[g] for g in gpus_to_use]})\"\n )\n return gpus_to_use", "def get_global_rank(backend) -> int:\n if backend != 'mpi':\n return int(os.environ.get('RANK', 0))\n else:\n return int(os.environ.get('OMPI_COMM_WORLD_RANK', 0))", "def _get_local_rank_size(comm):\n this_node = platform.node()\n ranks_nodes = comm.allgather((comm.Get_rank(), this_node))\n node2rankssofar = collections.defaultdict(int)\n local_rank = None\n for (rank, node) in ranks_nodes:\n if rank == comm.Get_rank():\n local_rank = node2rankssofar[node]\n node2rankssofar[node] += 1\n assert local_rank is not None\n return local_rank, node2rankssofar[this_node]", "def testUnknownIndices(self):\n params = constant_op.constant(((0, 1, 2),))\n indices = array_ops.placeholder(dtypes.int32)\n gather_nd_t = array_ops.gather_nd(params, indices, batch_dims=1)\n shape = gather_nd_t.get_shape()\n self.assertIsNone(shape.ndims)\n self.assertIsNone(tensor_shape.dimension_value(shape[0]))", "def get_local_params(self, par_global):\n return [\n par_global[a] if a is not None else b\n for a, b in zip(self._p_global_indices, self.p_local)\n ]", "def get_sobol_indices(self, order):\n self._set_statistics()\n return self.statistics_object.get_sobol(order)", "def indices(dimensions, dtype=int, sparse=False):\n\n if not isinstance(dimensions, (tuple, list)):\n pass\n elif len(dimensions) > 2 or len(dimensions) == 0:\n pass\n elif dtype != int:\n pass\n elif sparse:\n pass\n else:\n return dpnp_indices(dimensions)\n\n return call_origin(numpy.indices, dimensions, dtype, sparse)", "def init_distributed(args: dict):\n\n if is_distributed(args):\n dist.init_process_group(backend=\"nccl\")\n torch.cuda.set_device(args.local_rank)", "def _get_indices(self, indices: VecEnvIndices) -> Iterable[int]:\n if indices is None:\n indices = range(self.num_envs)\n elif isinstance(indices, int):\n indices = [indices]\n return indices", "def FindDistributedPoints(self, p_int, , vtkIdList, p_int_1):\n ...", "def getGlobalIdxVals( self, i : int ):\n return range(self._layout.starts[i],self._layout.ends[i])", "def GetNodeCommonality(self):\n\n self.__do_essential_memebers_exist__()\n\n elements = self.elements.ravel()\n idx_sort = np.argsort(elements)\n sorted_elements = elements[idx_sort]\n vals, idx_start = np.unique(sorted_elements, return_index=True)\n\n # Sets of indices\n flat_pos = np.split(idx_sort, idx_start[1:])\n els = np.split(idx_sort // int(self.elements.shape[1]), idx_start[1:])\n pos = np.split(idx_sort % int(self.elements.shape[1]), idx_start[1:])\n\n # In case one wants to return only the duplicates i.e. filter keeping only items occurring more than once\n # vals, idx_start, count = np.unique(sorted_elements, return_counts=True, return_index=True)\n # vals = vals[count > 1]\n # res = filter(lambda x: x.size > 1, res)\n\n return els, pos, flat_pos", "def get_unused_indices(program):\n used = get_used_indices(program)\n all_indices = set(range(len(program.var_types) - 1))\n return all_indices - used", "def compute_variable_indexes(path, overwrite=True, multiproc=False):\n if multiproc is True:\n tf.keras.backend.clear_session()\n set_cpu_option()\n\n gin_bindings = [\n \"evaluation.evaluation_fn = @variables_idx\",\n \"variables_idx.num_train = 10000\", \"evaluation.random_seed = 2051556033\",\n \"dataset.name='auto'\", \"evaluation.name = 'variables index'\"\n ]\n path = pathlib.Path(path)\n result_path = path.parent.parent / \"metrics\" / \"variance\" / \"filtered_variables\"\n logger.info(\"Computing variable indexes of {}\".format(path.parent.parent))\n gin_evaluation(path, result_path, overwrite, gin_bindings)", "def _get_sub_pgs(self, tensor_parallel_size: int):\n tp_ranks: List[List[int]] = []\n fsdp_ranks: List[List[int]] = []\n for rank in range(self.world_size):\n tp_idx = rank // tensor_parallel_size\n if len(tp_ranks) <= tp_idx:\n tp_ranks.append([])\n tp_ranks[tp_idx].append(rank)\n fsdp_idx = rank % tensor_parallel_size\n if len(fsdp_ranks) <= fsdp_idx:\n fsdp_ranks.append([])\n fsdp_ranks[fsdp_idx].append(rank)\n tp_pgs = [dist.new_group(ranks) for ranks in tp_ranks]\n fsdp_pgs = [dist.new_group(ranks) for ranks in fsdp_ranks]\n tp_pg = tp_pgs[self.rank // tensor_parallel_size]\n fsdp_pg = fsdp_pgs[self.rank % tensor_parallel_size]\n return tp_pg, fsdp_pg", "def _getNonPrototypeIndices(self, clusters: ndarray) -> ndarray:\n return np.delete(np.arange(self.dataSize), clusters.flatten())", "def _getitem2d(self, index):\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n ix = index[0]\n iz = index[1]\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[2]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n nz = hivects[1,:].max() - self.nghosts\n\n if npes > 1:\n nx = comm_world.allreduce(nx, op=mpi.MAX)\n nz = comm_world.allreduce(nz, op=mpi.MAX)\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[1] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n # --- Setup the size of the array to be returned and create it.\n # --- Space is added for multiple components if needed.\n sss = (max(0, ixstop - ixstart),\n max(0, izstop - izstart))\n if ncomps > 1 and ic is None:\n sss = tuple(list(sss) + [ncomps])\n resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)\n\n datalist = []\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iz1 = max(izstart, lovects[1,i])\n iz2 = min(izstop, lovects[1,i] + fields[i].shape[1])\n\n if ix1 < ix2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iz1 - lovects[1,i], iz2 - lovects[1,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iz1 - izstart, iz2 - izstart))\n\n datalist.append((vslice, fields[i][sss]))\n\n if npes == 1:\n all_datalist = [datalist]\n else:\n all_datalist = comm_world.allgather(datalist)\n\n for datalist in all_datalist:\n for vslice, ff in datalist:\n resultglobal[vslice] = ff\n\n # --- Now remove any of the reduced dimensions.\n sss = [slice(None), slice(None)]\n if not isinstance(ix, slice):\n sss[0] = 0\n if not isinstance(iz, slice):\n sss[1] = 0\n\n return resultglobal[tuple(sss)]", "def ordered_indices(self):\r\n '''we need random order'''\r\n if self.shuffle:\r\n indices = np.random.permutation(len(self))\r\n else:\r\n indices = np.arange(len(self))\r\n '''\r\n if self.tgt_sizes is not None:\r\n indices = indices[np.argsort(self.tgt_sizes[indices], kind='mergesort')]\r\n return indices[np.argsort(self.src_sizes[indices], kind='mergesort')]\r\n '''\r\n return indices", "def _staticneighs_get_corestored_by_inds_notslice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = [self.idxs[i] for i in inds]\n idxs = np.array(idxs) if type(self.idxs) == np.ndarray else idxs\n if self.sp_relative_pos is not None:\n sp_relative_pos = [self.sp_relative_pos[i] for i in inds]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def get_grid(grid_params, dims):\n orders = np.array(object=grid_params[\"orders\"][:dims], dtype=int)\n grid_min = np.array(object=grid_params[\"lower bounds\"][:dims], dtype=float)\n grid_max = np.array(object=grid_params[\"upper bounds\"][:dims], dtype=float)\n\n # calculate number of grid points and generate index\n n_points = orders.prod()\n index = np.array(object=range(n_points))\n\n # generate grid\n grid = {}\n for idx in range(dims):\n grid_values_tmp = np.linspace(grid_min[idx], grid_max[idx], orders[idx],)\n grid[idx] = grid_values_tmp\n\n return grid, index", "def get_grid_index(init_grid_size, map_size, device):\n H_init, W_init = init_grid_size\n H, W = map_size\n idx = torch.arange(H * W, device=device).reshape(1, 1, H, W)\n idx = F.interpolate(idx.float(), [H_init, W_init], mode='nearest').long()\n return idx.flatten()", "def map_to_global(self, idxs, part_id):\n return F.zerocopy_from_dgl_ndarray(\n _CAPI_DGLNDArrayPartitionMapToGlobal(\n self._partition, F.zerocopy_to_dgl_ndarray(idxs), part_id\n )\n )", "def main_rep_idxs(self):\n\n if '{}/{}'.format(SETTINGS, MAIN_REP_IDXS) in self.h5:\n return self.h5['{}/{}'.format(SETTINGS, MAIN_REP_IDXS)][:]\n else:\n return None", "def get_global_loads_for_static_analysis(self):\n try:\n\n cols = 1\n total_dof = DOF_PER_NODE_STRUCTURAL * len(self.preprocessor.nodes)\n\n _frequencies = np.array([0.], dtype=float)\n loads = np.zeros((total_dof, cols), dtype=complex)\n \n # elementary loads - element integration\n for element in self.preprocessor.structural_elements.values():\n position = element.global_dof\n # self-weight loads\n if self.preprocessor.project.weight_load:\n loads[position] += element.get_self_weighted_load(self.preprocessor.gravity_vector)\n # stress stiffening loads\n if self.preprocessor.project.internal_pressure_load:\n loads[position] += element.force_vector_stress_stiffening()\n # distributed loads\n if self.preprocessor.project.element_distributed_load:\n loads[position] += element.get_distributed_load()\n \n if self.preprocessor.project.external_nodal_loads:\n # nodal loads\n for node in self.preprocessor.nodes.values():\n if node.there_are_nodal_loads:\n position = node.global_dof\n if node.loaded_table_for_nodal_loads:\n temp_loads = [_frequencies if bc is None else bc for bc in node.nodal_loads]\n else:\n temp_loads = [_frequencies if bc is None else np.ones_like(_frequencies)*bc for bc in node.nodal_loads]\n loads[position, :] += temp_loads\n\n except Exception as _error_log:\n print(str(_error_log))\n \n return loads[self.unprescribed_indexes,:]", "def dim_map(dims, coos, cyclic=False, trim=False):\n # Figure out shape of dimensions given\n if isinstance(dims, np.ndarray):\n szs = dims.shape\n ndim = dims.ndim\n else:\n szs = _find_shape_of_nested_int_array(dims)\n ndim = len(szs)\n\n # Ensure `coos` in right format for 1d (i.e. not single tuples)\n if ndim == 1:\n if isinstance(coos, np.ndarray):\n coos = coos.ravel()\n elif not isinstance(coos[0], Integral):\n coos = (c[0] for c in coos)\n\n # Map coordinates to indices\n try:\n inds = _dim_mapper_methods[(ndim, cyclic, trim)](*szs, coos)\n except KeyError:\n inds = _dim_map_nd(szs, coos, cyclic, trim)\n\n # Ravel dims\n while ndim > 1:\n dims = itertools.chain.from_iterable(dims)\n ndim -= 1\n\n return tuple(dims), tuple(inds)", "def ordering(self):\n if self.dim_ordering is None:\n return list(range(self.rank))\n\n orig = self.dim_ordering.dims_and_symbols.dims\n return [orig.index(sym) for sym in self.dim_ordering.map.dims]", "def get_neighbourhood(indices, map_shape):\n if isinstance(map_shape, int):\n nx = 1\n size = map_shape\n elif len(map_shape) == 2:\n nx = map_shape[1]\n size = map_shape[0] * map_shape[1]\n else:\n print(\"Check your `map_shape` value.\")\n return\n extended = list(indices)\n for s in extended:\n susjedi = np.unique(\n np.array([s-2*nx,\n s-nx-1, s-nx, s-nx+1,\n s-2, s-1, s, s+1, s+2,\n s+nx-1, s+nx, s+nx+1,\n s+2*nx]))\n susjedi_cor = susjedi[(susjedi >= 0) & (susjedi < size)]\n extended = extended + list(susjedi_cor)\n return np.sort(np.unique(extended))", "def init_processes(rank, run_id, hosts, backend='gloo'):\n hosts = hosts.split(',')\n os.environ['MASTER_ADDR'] = hosts[0] # first worker is the master worker\n os.environ['MASTER_PORT'] = '29500'\n world_size = len(hosts)\n os.environ['WORLD_SIZE'] = str(world_size)\n os.environ['RANK'] = str(rank)\n dist.init_process_group(backend, rank=rank, world_size=world_size)\n run(rank, world_size, run_id)", "def get_global_index( self , ijk = None , active_index = None):\n gi = self.__global_index( active_index = active_index , ijk = ijk)\n return gi", "def internal_global_clustering(self, node_list):\n clustering = self.local_clustering()\n internal_clustering = clustering[node_list].mean()\n return internal_clustering", "def get_free_indices(program, program_len):\n used = get_used_indices(program)\n total = set(range(program_len + len(program.input_types)))\n return total - used", "def _notstaticneighs_get_corestored_by_inds_slice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = self.idxs\n if self.sp_relative_pos is not None:\n sp_relative_pos = []\n for k in range(len(self.sp_relative_pos)):\n sp_relative_pos += [[self.sp_relative_pos[k][i] for i in inds]]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def global_level(\n adata: AnnData,\n use_label: str = \"louvain\",\n use_rep: str = \"X_pca\",\n n_dims: int = 40,\n list_clusters: list = [],\n return_graph: bool = False,\n w: float = None,\n verbose: bool = True,\n copy: bool = False,\n) -> Optional[AnnData]:\n\n assert w <= 1, \"w should be in range 0 to 1\"\n # Get global graph\n G = _read_graph(adata, \"global_graph\")\n # Convert to directed graph\n H = G.to_directed()\n\n cat_inds = adata.uns[use_label + \"_index_dict\"]\n inds_cat = {v: k for (k, v) in cat_inds.items()}\n\n # Query cluster\n if type(list_clusters[0]) == str:\n list_clusters = [cat_inds[label] for label in list_clusters]\n query_nodes = list_clusters\n\n query_nodes = ordering_nodes(query_nodes, use_label, adata)\n if verbose:\n print(\n \"Start to construct the trajectory: \"\n + \" -> \".join(np.array(query_nodes).astype(str))\n )\n\n query_dict = {}\n order_dict = {}\n\n for i in query_nodes:\n order = 0\n for j in adata.obs[adata.obs[use_label] == str(inds_cat[i])][\n \"sub_cluster_labels\"\n ].unique():\n query_dict[int(j)] = int(i)\n order_dict[int(j)] = int(order)\n\n order += 1\n dm_list = []\n sdm_list = []\n order_big_dict = {}\n edge_list = []\n\n for i, j in enumerate(query_nodes):\n order_big_dict[j] = int(i)\n if i == len(query_nodes) - 1:\n break\n for j in adata.uns[\"split_node\"][query_nodes[i]]:\n for k in adata.uns[\"split_node\"][query_nodes[i + 1]]:\n edge_list.append((int(j), int(k)))\n\n # Calculate DPT distance matrix\n dm_list.append(\n ge_distance_matrix(\n adata,\n inds_cat[query_nodes[i]],\n inds_cat[query_nodes[i + 1]],\n use_label=use_label,\n use_rep=use_rep,\n n_dims=n_dims,\n )\n )\n # Calculate Spatial distance matrix\n sdm_list.append(\n spatial_distance_matrix(\n adata,\n inds_cat[query_nodes[i]],\n inds_cat[query_nodes[i + 1]],\n use_label=use_label,\n )\n )\n\n # Get centroid dictionary\n centroid_dict = adata.uns[\"centroid_dict\"]\n centroid_dict = {int(key): centroid_dict[key] for key in centroid_dict}\n\n H_sub = H.edge_subgraph(edge_list)\n if not nx.is_connected(H_sub.to_undirected()):\n raise ValueError(\n \"The chosen clusters are not available to construct the spatial trajectory! Please choose other path.\"\n )\n H_sub = nx.DiGraph(H_sub)\n prepare_root = []\n for node in adata.uns[\"split_node\"][query_nodes[0]]:\n H_sub.add_edge(9999, int(node))\n prepare_root.append(centroid_dict[int(node)])\n\n prepare_root = np.array(prepare_root)\n centroide = (\n sum(prepare_root[:, 0]) / len(prepare_root[:, 0]),\n sum(prepare_root[:, 1]) / len(prepare_root[:, 1]),\n )\n\n # Get centroid dictionary\n centroid_dict = adata.uns[\"centroid_dict\"]\n centroid_dict = {int(key): centroid_dict[key] for key in centroid_dict}\n\n H_sub = H.edge_subgraph(edge_list)\n H_sub = nx.DiGraph(H_sub)\n\n prepare_root = []\n for node in adata.uns[\"split_node\"][query_nodes[0]]:\n H_sub.add_edge(9999, int(node))\n prepare_root.append(centroid_dict[int(node)])\n\n prepare_root = np.array(prepare_root)\n centroide = (\n sum(prepare_root[:, 0]) / len(prepare_root[:, 0]),\n sum(prepare_root[:, 1]) / len(prepare_root[:, 1]),\n )\n centroid_dict[9999] = np.array(centroide)\n\n labels = nx.get_edge_attributes(H_sub, \"weight\")\n\n for edge, _ in labels.items():\n\n dm = dm_list[order_big_dict[query_dict[edge[0]]]]\n sdm = sdm_list[order_big_dict[query_dict[edge[0]]]]\n\n weight = dm[order_dict[edge[0]], order_dict[edge[1]]] * w + sdm[\n order_dict[edge[0]], order_dict[edge[1]]\n ] * (1 - w)\n H_sub[edge[0]][edge[1]][\"weight\"] = weight\n # tmp = H_sub\n\n H_sub = nx.algorithms.tree.minimum_spanning_arborescence(H_sub)\n H_nodes = list(range(len(H_sub.nodes)))\n\n node_convert = {}\n for pair in zip(list(H_sub.nodes), H_nodes):\n node_convert[pair[1]] = pair[0]\n\n adata.uns[\"PTS_graph\"] = {}\n adata.uns[\"PTS_graph\"][\"graph\"] = nx.to_scipy_sparse_matrix(H_sub)\n adata.uns[\"PTS_graph\"][\"node_dict\"] = node_convert\n\n if return_graph:\n return H_sub", "def _prog_field_indices(self):\n\n if self._pfi is not None:\n return self._pfi\n\n self.arbor._grow_tree(self)\n self._pfi = np.array([node.tree_id for node in self._prog_nodes])\n return self._pfi", "def get_local_ids(self,\n np.ndarray[uint32, mode='c', ndim=1] entities not None,\n int32 dent,\n np.ndarray[uint32, mode='c', ndim=1] incident not None,\n np.ndarray[uint32, mode='c', ndim=1] offsets not None,\n int32 dim):\n cdef Indices[1] _entities, _local_ids\n cdef MeshConnectivity _incident[1]\n cdef np.ndarray[uint32, mode='c', ndim=1] out\n\n if not entities.shape[0] > 0:\n return np.empty(0, dtype=np.uint32)\n\n _entities.num = entities.shape[0]\n _entities.indices = &entities[0]\n\n _incident.num = _entities.num\n _incident.n_incident = incident.shape[0]\n _incident.indices = &incident[0]\n _incident.offsets = &offsets[0]\n\n out = np.empty(_incident.n_incident, dtype=np.uint32)\n _local_ids.num = _incident.n_incident\n _local_ids.indices = &out[0]\n mesh_get_local_ids(self.mesh, _local_ids, _entities, dent, _incident, dim)\n\n return out", "def indices(online: bool = False) -> dict:\n return _get_indices(online)", "def analyse_doms(doms, counts, log):\n dom_masks = 1<<(doms.astype(np.uint64))\n\n # initialise space for masks\n ngb_masks = np.zeros_like(dom_masks)\n\n # Fill in the masks of all the neighbouring cells\n inc = [1,0,-1] # roll increments for left, middle, right\n for i in inc:\n ri = np.roll(dom_masks, i, axis=0)\n for j in inc:\n rj = np.roll(ri, j, axis=1)\n for k in inc:\n ngb_masks |= np.roll(rj, k, axis=2)\n\n\n\n count_ds, count_alls, pcts = [], [], []\n \n for d in range(doms.max()+1):\n idx = np.flatnonzero(doms==d)\n idx_all = np.flatnonzero(ngb_masks&(1<<d))\n \n count_d = counts.ravel()[idx].sum()\n count_all = counts.ravel()[idx_all].sum()\n \n pct_ghosts = ((count_all - count_d)*100)//count_all\n pcts.append(pct_ghosts)\n print('Domain %2d'%d, 'has {:,} real points, {:,} total of which'.format(count_d, count_all), \n '%d%% are ghosts'%pct_ghosts, file=log)\n\n count_ds.append(count_d)\n count_alls.append(count_all)\n\n\n\n print('Total particles {:,}, total evaluated {:,} (average ghosts {:,}%)'.format(sum(count_ds), sum(count_alls), ((sum(count_alls)-sum(count_ds))*100)//sum(count_alls)), file=log)\n print('maximum {:,} on a single proc, worst ghost percentage {:,}%'.format(max(count_alls), max(pcts)), file=log)", "def get_indexes(self, variable, *args):\n\n return [get_subset_idxs(data, min, max)\n for data, (min, max) in args]", "def indices(self):\n return self._kbounded_partitions", "def _notstaticneighs_get_corestored_by_inds_notslice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = []\n for k in range(len(self.idxs)):\n idxs.append([self.idxs[k][i] for i in inds])\n idxs = np.array(idxs) if type(self.idxs) == np.ndarray else idxs\n\n if self.sp_relative_pos is not None:\n sp_relative_pos = []\n for k in range(len(self.sp_relative_pos)):\n sp_relative_pos += [[self.sp_relative_pos[k][i] for i in inds]]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def global_node_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GlobalReplicationGroupGlobalNodeGroupArgs']]]]:\n return pulumi.get(self, \"global_node_groups\")", "def pin_global_variables(device):\n\n def getter(getter, *args, **kwargs):\n var_collections = kwargs.get(\"collections\", None)\n if var_collections is None:\n var_collections = [tf.GraphKeys.GLOBAL_VARIABLES]\n if tf.GraphKeys.GLOBAL_VARIABLES in var_collections:\n with tf.device(device):\n return getter(*args, **kwargs)\n else:\n return getter(*args, **kwargs)\n\n with tf.variable_scope(\"\", custom_getter=getter) as vs:\n yield vs", "def cal_globalDissimilarity(self):\n local_diss = self.cal_localDissimilarity()\n global_diss = np.sum(local_diss)\n\n return global_diss", "def cross_global_clustering_sparse(self, node_list1, node_list2):\n # Get cross local clustering sequences\n cc = self.cross_local_clustering_sparse(node_list1, node_list2)\n return cc.mean()", "def setup(rank, world_size, master_addr='127.0.0.1', master_port=12355):\n os.environ['MASTER_ADDR'] = str(master_addr)\n os.environ['MASTER_PORT'] = str(int(master_port))\n dist.init_process_group(\"gloo\", rank=rank, world_size=world_size) # initialize the process group", "def shard_op(op, process_mesh=None, in_shard_specs=None, out_shard_specs=None):\n\n if process_mesh is not None:\n assert isinstance(\n process_mesh, ProcessMesh\n ), \"Argument process_mesh {} is not an instance of ProcessMesh\".format(\n process_mesh\n )\n else:\n process_mesh = get_current_process_mesh()\n assert (\n process_mesh is not None\n ), \"Specify the process mesh argument or use ProcessMesh context manager first.\"\n in_dims_mappings = []\n if in_shard_specs is not None:\n assert all(\n (isinstance(shard_spec, list) or shard_spec is None)\n for shard_spec in in_shard_specs\n ), \"in_shard_spec {} is not a list of list or None\".format(\n in_shard_specs\n )\n for shard_spec in in_shard_specs:\n if shard_spec is not None:\n in_dims_mappings.append(\n convert_to_dims_mapping(shard_spec, process_mesh)\n )\n else:\n in_dims_mappings.append(None)\n out_dims_mappings = []\n if out_shard_specs is not None:\n assert all(\n (isinstance(shard_spec, list) or shard_spec is None)\n for shard_spec in out_shard_specs\n ), \"out_shard_spec {} is not a list of list or None\".format(\n out_shard_specs\n )\n for shard_spec in out_shard_specs:\n if shard_spec is not None:\n out_dims_mappings.append(\n convert_to_dims_mapping(shard_spec, process_mesh)\n )\n else:\n out_dims_mappings.append(None)\n op = DistributedOperatorHelper(\n op, process_mesh, in_dims_mappings, out_dims_mappings\n )\n return op", "def community_layout(g, partition):\n\n pos_communities = _position_communities(g, partition, scale=3.)\n\n pos_nodes = _position_nodes(g, partition, scale=1.)\n\n # combine positions\n pos = dict()\n for node in g.nodes():\n pos[node] = pos_communities[node] + pos_nodes[node]\n\n return pos", "def init_processes(rank, size, backend='gloo'):\n os.environ['MASTER_ADDR'] = '12.12.10.13'\n os.environ['MASTER_PORT'] = '29500'\n dist.init_process_group(backend, rank=rank, world_size=size)", "def _initialize_distributed():\r\n args = get_args()\r\n\r\n device_count = torch.cuda.device_count()\r\n if torch.distributed.is_initialized():\r\n\r\n if args.rank == 0:\r\n print('torch distributed is already initialized, '\r\n 'skipping initialization ...', flush=True)\r\n args.rank = torch.distributed.get_rank()\r\n args.world_size = torch.distributed.get_world_size()\r\n\r\n else:\r\n\r\n if args.rank == 0:\r\n print('> initializing torch distributed ...', flush=True)\r\n # Manually set the device ids.\r\n if device_count > 0:\r\n device = args.rank_original % device_count\r\n if args.local_rank is not None:\r\n assert args.local_rank == device, \\\r\n 'expected local-rank to be the same as rank % device-count.'\r\n else:\r\n args.local_rank = device\r\n torch.cuda.set_device(device)\r\n\r\n torch.distributed.init_process_group(\r\n backend=args.distributed_backend,\r\n world_size=args.world_size, rank=args.rank,\r\n init_method=args.init_method)\r\n\r\n # Set the model-parallel / data-parallel communicators.\r\n if device_count > 0:\r\n mpu.initialize_model_parallel(args.model_parallel_size, args.summa_dim)", "def get_indices(data, coarse_grid_path, proj_str, use_saved_indices=False, index_path=None):\n if use_saved_indices:\n return joblib.load(index_path)\n else:\n coarse_grid = xr.open_dataset(coarse_grid_path)\n lat, lon = data['lat'].values, data['lon'].values\n coarse_lat, coarse_lon = coarse_grid['lat'].values, coarse_grid['lon'].values\n indices = find_coord_indices(coarse_lon, coarse_lat, lon.ravel(), lat.ravel(), proj_str)\n\n return indices", "def _get_global_secondary_indexes(table_name: str) -> List[str]:\n result = _describe_table(table_name=table_name)\n logger.debug(result)\n\n return [gsi['IndexName'] for gsi in result['Table'].get('GlobalSecondaryIndexes', [])]", "def init_processes(fn, local_rank, backend='nccl'):\n dist.init_process_group(backend)\n fn(dist.get_rank(), dist.get_world_size(), local_rank)", "def get_grid_locations(self, top_left, other_pos):\n cell_x = torch.floor(((other_pos[:, 0] - top_left[:, 0]) / self.neighborhood_size) *self.grid_size)\n\n # Added this part to implementation, otherwise the pooling is going to run into an indexing error\n cell_x[cell_x == self.grid_size] -= 1\n cell_y = torch.floor(((top_left[:, 1] - other_pos[:, 1]) / self.neighborhood_size) *self.grid_size)\n cell_y[cell_y == self.grid_size] -= 1\n grid_pos = cell_x + cell_y * self.grid_size\n\n return grid_pos", "def get_param_indexes(self):\n self.debug.start_function('get_param_indexes')\n\n for i, key in enumerate(self.mcmc_version.param_keys):\n self.param_idxs[key] = i\n for i, key in enumerate(self.mcmc_version.interp_keys):\n self.interp_idxs[key] = i\n\n self.debug.end_function()", "def _staticneighs_get_corestored_by_inds_slice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = self.idxs\n if self.sp_relative_pos is not None:\n sp_relative_pos = [self.sp_relative_pos[i] for i in inds]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def obs2grid(glon=None, glat=None, galt=None, nest='high res global',\n sites=None, debug=False):\n if isinstance(glon, type(None)):\n glon, glat, galt = get_latlonalt4res(nest=nest, centre=False,\n debug=debug)\n\n # Assume use of known CAST sites... unless others given.\n if isinstance(sites, type(None)):\n loc_dict = get_loc(rtn_dict=True)\n sites = list(loc_dict.keys())\n\n # Pull out site location indicies\n indices_list = []\n for site in sites:\n lon, lat, alt = loc_dict[site]\n vars = get_xy(lon, lat, glon, glat)\n indices_list += [vars]\n return indices_list", "def ind_nodes(self, graph=None):\n if graph is None:\n graph = self.graph\n\n dependent_nodes = set(\n node for dependents in graph.values() for node in dependents\n )\n return [node for node in graph.keys() if node not in dependent_nodes]", "def build_occupancy_map(human : Human, other_agents : np.array, cell_num : int, cell_size : float, om_channel_size : int) -> np.array:\n other_px = other_agents[:, 0] - human.px\n other_py = other_agents[:, 1] - human.py\n\n # new x-axis is in the direction of human's velocity\n human_velocity_angle = np.arctan2(human.vy, human.vx)\n other_human_orientation = np.arctan2(other_py, other_px)\n rotation = other_human_orientation - human_velocity_angle\n distance = np.linalg.norm([other_px, other_py], axis=0)\n other_px = np.cos(rotation) * distance\n other_py = np.sin(rotation) * distance\n\n # compute indices of humans in the grid\n other_x_index = np.floor(other_px / cell_size + cell_num / 2)\n other_y_index = np.floor(other_py / cell_size + cell_num / 2)\n other_x_index[other_x_index < 0] = float('-inf')\n other_x_index[other_x_index >= cell_num] = float('-inf')\n other_y_index[other_y_index < 0] = float('-inf')\n other_y_index[other_y_index >= cell_num] = float('-inf')\n grid_indices = cell_num * other_y_index + other_x_index\n occupancy_map = np.isin(range(cell_num ** 2), grid_indices)\n if om_channel_size == 1:\n return occupancy_map.astype(int)\n else:\n # calculate relative velocity for other agents\n other_human_velocity_angles = np.arctan2(other_agents[:, 3], other_agents[:, 2])\n rotation = other_human_velocity_angles - human_velocity_angle\n speed = np.linalg.norm(other_agents[:, 2:4], axis=1)\n other_vx = np.cos(rotation) * speed\n other_vy = np.sin(rotation) * speed\n dm = [list() for _ in range(cell_num ** 2 * om_channel_size)]\n for i, index in np.ndenumerate(grid_indices):\n if index in range(cell_num ** 2):\n if om_channel_size == 2:\n dm[2 * int(index)].append(other_vx[i])\n dm[2 * int(index) + 1].append(other_vy[i])\n elif om_channel_size == 3:\n dm[int(index)].append(1)\n dm[int(index) + cell_num ** 2].append(other_vx[i])\n dm[int(index) + cell_num ** 2 * 2].append(other_vy[i])\n else:\n raise NotImplementedError\n for i, cell in enumerate(dm):\n dm[i] = sum(dm[i]) / len(dm[i]) if len(dm[i]) != 0 else 0\n return dm", "def getBatch(self, n, rng, dataset):\n pmax = self._root.priority\n step = pmax / n\n indices = np.zeros(n, dtype='int32')\n for i in range(n):\n p = rng.uniform(i*step, (i+1)*step)\n node = self.find(p)\n index = self._checkTerminal(node.position, dataset)\n if (index >= 0):\n indices[i] = index\n else:\n return np.zeros(0)\n\n return indices", "def init_distributed_mode(params):\n params.is_slurm_job = 'SLURM_JOB_ID' in os.environ \n has_local_rank = hasattr(params, 'local_rank')\n\n # SLURM job\n if params.is_slurm_job and has_local_rank:\n\n assert params.local_rank == -1 # on the cluster, this is handled by SLURM\n\n SLURM_VARIABLES = [\n 'SLURM_JOB_ID',\n 'SLURM_JOB_NODELIST', 'SLURM_JOB_NUM_NODES', 'SLURM_NTASKS', 'SLURM_TASKS_PER_NODE',\n 'SLURM_MEM_PER_NODE', 'SLURM_MEM_PER_CPU',\n 'SLURM_NODEID', 'SLURM_PROCID', 'SLURM_LOCALID', 'SLURM_TASK_PID'\n ]\n\n PREFIX = \"%i - \" % int(os.environ['SLURM_PROCID'])\n for name in SLURM_VARIABLES:\n value = os.environ.get(name, None)\n #print(PREFIX + \"%s: %s\" % (name, str(value)))\n\n # # job ID\n # params.job_id = os.environ['SLURM_JOB_ID']\n\n # number of nodes / node ID\n params.n_nodes = int(os.environ['SLURM_JOB_NUM_NODES'])\n params.node_id = int(os.environ['SLURM_NODEID'])\n\n # local rank on the current node / global rank\n params.local_rank = int(os.environ['SLURM_LOCALID'])\n params.global_rank = int(os.environ['SLURM_PROCID'])\n\n # number of processes / GPUs per node\n params.world_size = int(os.environ['SLURM_NTASKS'])\n params.n_gpu_per_node = params.world_size // params.n_nodes\n\n # define master address and master port\n hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', os.environ['SLURM_JOB_NODELIST']])\n params.main_addr = hostnames.split()[0].decode('utf-8')\n assert 10001 <= params.main_port <= 20000 or params.world_size == 1\n #print(PREFIX + \"Master address: %s\" % params.master_addr)\n #print(PREFIX + \"Master port : %i\" % params.master_port)\n\n # set environment variables for 'env://'\n os.environ['MASTER_ADDR'] = params.main_addr\n os.environ['MASTER_PORT'] = str(params.main_port)\n os.environ['WORLD_SIZE'] = str(params.world_size)\n os.environ['RANK'] = str(params.global_rank)\n params.is_distributed = True\n\n\n # multi-GPU job (local or multi-node) - jobs started with torch.distributed.launch\n elif has_local_rank and params.local_rank != -1:\n\n assert params.main_port == -1\n\n # read environment variables\n params.global_rank = int(os.environ['RANK'])\n params.world_size = int(os.environ['WORLD_SIZE'])\n params.n_gpu_per_node = int(os.environ['NGPU'])\n\n # number of nodes / node ID\n params.n_nodes = params.world_size // params.n_gpu_per_node\n params.node_id = params.global_rank // params.n_gpu_per_node\n params.is_distributed = True\n\n else:\n n_gpu = torch.cuda.device_count()\n params.n_nodes = 1\n params.node_id = 0\n params.local_rank = 0\n params.global_rank = 0\n params.world_size = n_gpu\n params.n_gpu_per_node = n_gpu\n params.is_distributed = False\n\n # define whether this is the master process / if we are in distributed mode\n params.is_main = params.node_id == 0 and params.local_rank == 0\n params.multi_node = params.n_nodes > 1\n params.multi_gpu = params.world_size > 1\n\n # summary\n PREFIX = \"%i - \" % params.global_rank\n\n # set GPU device\n if params.is_distributed:\n torch.cuda.set_device(params.local_rank)\n device = torch.device(\"cuda\", params.local_rank)\n else:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n params.device = device\n\n # summary\n PREFIX = \"%i - \" % params.global_rank\n print(PREFIX + \"Number of nodes: %i\" % params.n_nodes)\n print(PREFIX + \"Node ID : %i\" % params.node_id)\n print(PREFIX + \"Local rank : %i\" % params.local_rank)\n print(PREFIX + \"Global rank : %i\" % params.global_rank)\n print(PREFIX + \"World size : %i\" % params.world_size)\n print(PREFIX + \"GPUs per node : %i\" % params.n_gpu_per_node)\n print(PREFIX + \"Multi-node : %s\" % str(params.multi_node))\n print(PREFIX + \"Multi-GPU : %s\" % str(params.multi_gpu))\n print(PREFIX + \"Hostname : %s\" % socket.gethostname())\n\n # initialize multi-GPU\n if params.is_distributed:\n\n # http://pytorch.apachecn.org/en/0.3.0/distributed.html#environment-variable-initialization\n # 'env://' will read these environment variables:\n # MASTER_PORT - required; has to be a free port on machine with rank 0\n # MASTER_ADDR - required (except for rank 0); address of rank 0 node\n # WORLD_SIZE - required; can be set either here, or in a call to init function\n # RANK - required; can be set either here, or in a call to init function\n\n #print(\"Initializing PyTorch distributed ...\")\n torch.distributed.init_process_group(\n init_method='env://',\n backend='nccl',\n )", "def _get_host_placement_all(self, context):\n raise NotImplementedError('_get_host_placement_all not implemented')", "def nsi_cross_global_clustering(self, node_list1, node_list2):\n nsi_cc = self.nsi_cross_local_clustering(node_list1, node_list2)\n node_weights = self.node_weights[node_list1]\n return sum(node_weights * nsi_cc) / sum(node_weights)", "def global_index( self , active_index = None, ijk = None):\n return self.__global_index( active_index = active_index , ijk = ijk )", "def get_pulling_indices(self, weight):\n pass", "def all_gather(data):\n world_size = dist.get_world_size()\n if world_size == 1:\n return [data]\n\n buffer = pickle.dumps(data) #write data into Bytes and stores in buffer\n np_buffer = np.frombuffer(buffer, dtype=np.int8)\n tensor = paddle.to_tensor(np_buffer, dtype='int32') # uint8 doese not have many ops in paddle\n\n # obtain Tensor size of each rank\n local_size = paddle.to_tensor([tensor.shape[0]])\n size_list = []\n dist.all_gather(size_list, local_size)\n max_size = max(size_list)\n\n # receiving tensors from all ranks, \n # all_gather does not support different shape, so we use padding\n tensor_list = []\n if local_size != max_size:\n padding = paddle.empty(shape=(max_size - local_size, ), dtype='int32')\n tensor = paddle.concat((tensor, padding), axis=0)\n dist.all_gather(tensor_list, tensor)\n\n data_list = []\n for size, tensor in zip(size_list, tensor_list):\n buffer = tensor.astype('uint8').cpu().numpy().tobytes()[:size]\n data_list.append(pickle.loads(buffer))\n\n return data_list", "def construct_mpi_topology(self, dico):\n period = [True]*self.dim\n\n if dico is None:\n comm = mpi.COMM_WORLD\n else:\n comm = dico.get('comm', mpi.COMM_WORLD)\n self.mpi_topo = MPI_topology(self.dim, period, comm)", "def findSpecificIndex(cluster_size, cluster_idx, tracked_clusters, index_tracked_clusters, indi):\n \n # Calculate the largest cluster\n maxi = max(cluster_size)\n \n # Index the largest cluster\n for i, cs in enumerate(cluster_size):\n if cs == maxi:\n index_tracked_clusters[indi] = i \n tracked_clusters[indi] = cluster_idx[i] \n break", "def distribute_fields_dofs(fields, cell_tasks, is_overlap=True,\n use_expand_dofs=False, save_inter_regions=False,\n output_dir=None, comm=None, verbose=False):\n if comm is None:\n comm = PETSc.COMM_WORLD\n\n size = comm.size\n\n if comm.rank == 0:\n gfds = []\n inter_facets = get_inter_facets(fields[0].domain, cell_tasks)\n for field in fields:\n aux = create_task_dof_maps(field, cell_tasks, inter_facets,\n is_overlap=is_overlap,\n use_expand_dofs=use_expand_dofs,\n save_inter_regions=save_inter_regions,\n output_dir=output_dir)\n cell_parts = aux[2]\n n_cell_parts = [len(ii) for ii in cell_parts]\n output('numbers of cells in tasks (without overlaps):',\n n_cell_parts, verbose=verbose)\n assert_(sum(n_cell_parts) == field.domain.mesh.n_el)\n assert_(nm.all(nm.array(n_cell_parts) > 0))\n\n gfd = Struct(name='global field %s distribution' % field.name,\n dof_maps=aux[0], id_map=aux[1],\n cell_parts=aux[2], overlap_cells=aux[3],\n coffsets=nm.empty(size, dtype=nm.int32))\n gfds.append(gfd)\n\n # Initialize composite offsets of DOFs.\n if len(fields) > 1:\n # Renumber id_maps for field inter-leaving.\n offset = 0\n for ir in range(size):\n for ii, gfd in enumerate(gfds):\n dof_map = gfd.dof_maps[ir]\n n_owned = dof_map[3]\n off = dof_map[4]\n\n iown = nm.concatenate([dof_map[0]] + dof_map[1])\n gfd.id_map[iown] += offset - off\n gfd.coffsets[ir] = offset\n\n offset += n_owned\n\n else:\n gfd = gfds[0]\n gfd.coffsets[:] = [gfd.dof_maps[ir][4] for ir in range(size)]\n\n else:\n gfds = [None] * len(fields)\n\n lfds = []\n for ii, field in enumerate(fields):\n aux = distribute_field_dofs(field, gfds[ii],\n use_expand_dofs=use_expand_dofs,\n comm=comm, verbose=verbose)\n lfd = Struct(name='local field %s distribution' % field.name,\n cells=aux[0], petsc_dofs_range=aux[1],\n petsc_dofs_conn=aux[2])\n lfds.append(lfd)\n\n return lfds, gfds", "def demo_select_indexes_dynamic_dimensions():\n\n ps = tf.placeholder(tf.float32, [None, 2])\n idxs = tf.placeholder(tf.int32, [None])\n\n y = tf.gather_nd(\n ps,\n tf.transpose(tf.stack([tf.range(tf.shape(idxs)[0]), idxs])))\n\n with tf.Session('') as sess:\n print(sess.run(y, feed_dict={\n ps: [[0.2, 0.8],\n [0.4, 0.6],\n [0.25, 0.75]],\n idxs: [1, 0, 1]\n }))\n print(sess.run(y, feed_dict={\n ps: [[0.2, 0.8],\n [0.4, 0.6],\n [0.4, 0.6],\n [0.4, 0.6],\n [0.25, 0.75]],\n idxs: [1, 0, 0, 1, 1]\n }))", "def compute_node_dissociation_index(community_vect, sparse_mat):\n dense_mat = sparse_mat.todense()\n undir_dense_mat = dense_mat + np.transpose(dense_mat)\n bin_dense_mat = np.array(undir_dense_mat != 0, dtype=int)\n\n degree_vect = np.array(np.sum(bin_dense_mat != 0, axis=1), dtype='float')\n community_indexes = np.unique(community_vect)\n\n ndi_values = np.ones(\n shape=(community_vect.shape[0]), dtype='float')\n\n for i in range(ndi_values.shape[0]):\n\n \"\"\"\n same_com = np.where(\n np.array([community_vect[i] == community_vect[j] for j in\n range(ndi_values.shape[0])],dtype = \"bool\") == True)\n\n print (same_com[0])\n\n val = 1.0-(np.sum(bin_dense_mat[i,\n same_com[0]])/float(degree_vect[i]))\n \"\"\"\n\n val2 = 0.0\n\n for j in community_indexes:\n if j != -1 and j != community_vect[i]:\n val2 += np.sum(bin_dense_mat[i, community_vect == j])\n\n if degree_vect[i]:\n ndi_values[i] = val2/float(degree_vect[i])\n\n else:\n print(\"Warning, degree is null for node {}\".format(i))\n print(bin_dense_mat)\n\n ndi_values[i] = 0.0\n\n return ndi_values", "def base_idx_neighbor_idx_simplices(n_base, n_neighbors=5, n_dim=2):\n combinations = np.array(list(itertools.combinations(np.arange(1,\n n_neighbors),\n n_dim-1))).astype(int)\n base_indices = np.repeat(np.arange(n_base), len(combinations))\n all_simplices = np.vstack([base_indices,\n np.tile(combinations, (n_base, 1)).T]).T\n #print('simplices', os.getpid(), len(all_simplices), flush=True)\n return all_simplices", "def create_global_index(self):\n\n LOGGER.debug(\"Indexing ...\")\n errors = []\n err = self.index_team_keys()\n if err is not None:\n errors.append(err)\n\n err = self.index_player_keys()\n if err is not None:\n errors.append(err)\n\n err_len = len(errors)\n\n if err_len == 0:\n LOGGER.debug(\"Indexing finished.\")\n return None\n elif err_len > 0:\n LOGGER.error(f\"Indexing finished with {err_len} error(s).\")\n return errors", "def _create_gather(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n node.input.append(op.name + \":indices\")\n return node", "def indices_and_currents_TSC_2D( charge_electron, positions_x, positions_y, velocity_x, velocity_y,\\\n x_grid, y_grid, ghost_cells, length_domain_x, length_domain_y, dt ):\n \n \n positions_x_new = positions_x + velocity_x * dt\n positions_y_new = positions_y + velocity_y * dt\n\n base_indices_x = af.data.constant(0, positions_x.elements(), dtype=af.Dtype.u32)\n base_indices_y = af.data.constant(0, positions_x.elements(), dtype=af.Dtype.u32)\n\n dx = af.sum(x_grid[1] - x_grid[0])\n dy = af.sum(y_grid[1] - y_grid[0])\n\n\n # Computing S0_x and S0_y\n ###########################################################################################\n \n # Determining the grid cells containing the respective particles\n \n x_zone = (((af.abs(positions_x - af.sum(x_grid[0])))/dx).as_type(af.Dtype.u32))\n y_zone = (((af.abs(positions_y - af.sum(y_grid[0])))/dy).as_type(af.Dtype.u32))\n\n \n # Determing the indices of the closest grid node in x direction\n\n temp = af.where(af.abs(positions_x-x_grid[x_zone]) < \\\n af.abs(positions_x-x_grid[x_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_x[temp] = x_zone[temp]\n\n temp = af.where(af.abs(positions_x - x_grid[x_zone]) >= \\\n af.abs(positions_x-x_grid[x_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_x[temp] = (x_zone[temp] + 1).as_type(af.Dtype.u32) \n\n\n # Determing the indices of the closest grid node in y direction\n\n temp = af.where(af.abs(positions_y-y_grid[y_zone]) < \\\n af.abs(positions_y-y_grid[y_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_y[temp] = y_zone[temp]\n\n temp = af.where(af.abs(positions_y - y_grid[y_zone])>=af.abs(positions_y-x_grid[y_zone + 1]))\n\n if(temp.elements()>0):\n base_indices_y[temp] = (y_zone[temp] + 1).as_type(af.Dtype.u32) \n\n # Concatenating the index list for near by grid nodes in x direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n\n base_indices_minus_two = (base_indices_x - 2).as_type(af.Dtype.u32) \n base_indices_minus = (base_indices_x - 1).as_type(af.Dtype.u32) \n base_indices_plus = (base_indices_x + 1).as_type(af.Dtype.u32) \n base_indices_plus_two = (base_indices_x + 2).as_type(af.Dtype.u32) \n\n\n\n index_list_x = af.join( 1,\\\n af.join(1, base_indices_minus_two, base_indices_minus, base_indices_x),\\\n af.join(1, base_indices_plus, base_indices_plus_two),\\\n )\n\n\n\n # Concatenating the index list for near by grid nodes in y direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n \n base_indices_minus_two = (base_indices_y - 2).as_type(af.Dtype.u32) \n base_indices_minus = (base_indices_y - 1).as_type(af.Dtype.u32) \n base_indices_plus = (base_indices_y + 1).as_type(af.Dtype.u32) \n base_indices_plus_two = (base_indices_y + 2).as_type(af.Dtype.u32) \n\n\n index_list_y = af.join( 1,\\\n af.join(1, base_indices_minus_two, base_indices_minus, base_indices_y),\\\n af.join(1, base_indices_plus, base_indices_plus_two),\\\n )\n\n # Concatenating the positions_x for determining weights for near by grid nodes in y direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n\n positions_x_5x = af.join( 0,\\\n af.join(0, positions_x, positions_x, positions_x),\\\n af.join(0, positions_x, positions_x),\\\n )\n\n positions_y_5x = af.join( 0,\\\n af.join(0, positions_y, positions_y, positions_y),\\\n af.join(0, positions_y, positions_y),\\\n )\n\n\n\n\n # Determining S0 for positions at t = n * dt\n\n\n distance_nodes_x = x_grid[af.flat(index_list_x)]\n\n distance_nodes_y = y_grid[af.flat(index_list_y)]\n\n\n W_x = 0 * distance_nodes_x.copy()\n W_y = 0 * distance_nodes_y.copy()\n\n\n # Determining weights in x direction\n\n temp = af.where(af.abs(distance_nodes_x - positions_x_5x) < (0.5*dx) )\n\n if(temp.elements()>0):\n W_x[temp] = 0.75 - (af.abs(distance_nodes_x[temp] - positions_x_5x[temp])/dx)**2\n\n temp = af.where((af.abs(distance_nodes_x - positions_x_5x) >= (0.5*dx) )\\\n * (af.abs(distance_nodes_x - positions_x_5x) < (1.5 * dx) )\\\n )\n\n if(temp.elements()>0):\n W_x[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_x[temp] - positions_x_5x[temp])/dx))**2\n\n\n\n # Determining weights in y direction\n\n temp = af.where(af.abs(distance_nodes_y - positions_y_5x) < (0.5*dy) )\n\n if(temp.elements()>0):\n W_y[temp] = 0.75 - (af.abs(distance_nodes_y[temp] - positions_y_5x[temp])/dy)**2\n\n temp = af.where((af.abs(distance_nodes_y - positions_y_5x) >= (0.5*dy) )\\\n * (af.abs(distance_nodes_y - positions_y_5x) < (1.5 * dy) )\\\n )\n\n if(temp.elements()>0):\n W_y[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_y[temp] - positions_y_5x[temp])/dy))**2\n\n # Restructering W_x and W_y for visualization and ease of understanding\n\n W_x = af.data.moddims(W_x, positions_x.elements(), 5)\n W_y = af.data.moddims(W_y, positions_y.elements(), 5)\n\n # Tiling the S0_x and S0_y for the 25 indices around the particle\n \n S0_x = af.tile(W_x, 1, 1, 5)\n S0_y = af.tile(W_y, 1, 1, 5)\n\n\n S0_y = af.reorder(S0_y, 0, 2, 1)\n\n\n\n #Computing S1_x and S1_y\n ###########################################################################################\n\n positions_x_5x_new = af.join( 0,\\\n af.join(0, positions_x_new, positions_x_new, positions_x_new),\\\n af.join(0, positions_x_new, positions_x_new),\\\n )\n\n positions_y_5x_new = af.join( 0,\\\n af.join(0, positions_y_new, positions_y_new, positions_y_new),\\\n af.join(0, positions_y_new, positions_y_new),\\\n )\n\n\n\n\n # Determining S0 for positions at t = n * dt\n\n W_x = 0 * distance_nodes_x.copy()\n W_y = 0 * distance_nodes_y.copy()\n\n\n # Determining weights in x direction\n\n temp = af.where(af.abs(distance_nodes_x - positions_x_5x_new) < (0.5*dx) )\n\n if(temp.elements()>0):\n W_x[temp] = 0.75 - (af.abs(distance_nodes_x[temp] - positions_x_5x_new[temp])/dx)**2\n\n temp = af.where((af.abs(distance_nodes_x - positions_x_5x_new) >= (0.5*dx) )\\\n * (af.abs(distance_nodes_x - positions_x_5x_new) < (1.5 * dx) )\\\n )\n\n if(temp.elements()>0):\n W_x[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_x[temp] \\\n - positions_x_5x_new[temp])/dx\\\n )\\\n )**2\n\n\n\n # Determining weights in y direction\n\n temp = af.where(af.abs(distance_nodes_y - positions_y_5x_new) < (0.5*dy) )\n\n if(temp.elements()>0):\n W_y[temp] = 0.75 - (af.abs(distance_nodes_y[temp] \\\n - positions_y_5x_new[temp]\\\n )/dy\\\n )**2\n\n temp = af.where((af.abs(distance_nodes_y - positions_y_5x_new) >= (0.5*dy) )\\\n * (af.abs(distance_nodes_y - positions_y_5x_new) < (1.5 * dy) )\\\n )\n\n if(temp.elements()>0):\n W_y[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_y[temp] \\\n - positions_y_5x_new[temp])/dy\\\n )\\\n )**2\n\n # Restructering W_x and W_y for visualization and ease of understanding\n\n W_x = af.data.moddims(W_x, positions_x.elements(), 5)\n W_y = af.data.moddims(W_y, positions_x.elements(), 5)\n\n # Tiling the S0_x and S0_y for the 25 indices around the particle \n \n S1_x = af.tile(W_x, 1, 1, 5)\n S1_y = af.tile(W_y, 1, 1, 5)\n\n S1_y = af.reorder(S1_y, 0, 2, 1)\n\n\n ###########################################################################################\n\n # Determining the final weight matrix for currents in 3D matrix form factor\n\n\n W_x = (S1_x - S0_x) * (S0_y + (0.5 *(S1_y - S0_y)) )\n\n\n W_y = (S1_y - S0_y) * (S0_x + (0.5 *(S1_x - S0_x)) )\n\n\n ###########################################################################################\n\n\n # Assigning Jx and Jy according to Esirkepov's scheme\n\n Jx = af.data.constant(0, positions_x.elements(), 5, 5, dtype = af.Dtype.f64)\n Jy = af.data.constant(0, positions_x.elements(), 5, 5, dtype = af.Dtype.f64)\n\n\n Jx[:, 0, :] = -1 * charge_electron * (dx/dt) * W_x[:, 0, :].copy()\n Jx[:, 1, :] = Jx[:, 0, :] + -1 * charge_electron * (dx/dt) * W_x[:, 1, :].copy()\n Jx[:, 2, :] = Jx[:, 1, :] + -1 * charge_electron * (dx/dt) * W_x[:, 2, :].copy()\n Jx[:, 3, :] = Jx[:, 2, :] + -1 * charge_electron * (dx/dt) * W_x[:, 3, :].copy()\n Jx[:, 4, :] = Jx[:, 3, :] + -1 * charge_electron * (dx/dt) * W_x[:, 4, :].copy()\n \n # Computing current density using currents\n \n Jx = (1/(dx * dy)) * Jx\n\n\n Jy[:, :, 0] = -1 * charge_electron * (dy/dt) * W_y[:, :, 0].copy()\n Jy[:, :, 1] = Jy[:, :, 0] + -1 * charge_electron * (dy/dt) * W_y[:, :, 1].copy()\n Jy[:, :, 2] = Jy[:, :, 1] + -1 * charge_electron * (dy/dt) * W_y[:, :, 2].copy()\n Jy[:, :, 3] = Jy[:, :, 2] + -1 * charge_electron * (dy/dt) * W_y[:, :, 3].copy()\n Jy[:, :, 4] = Jy[:, :, 3] + -1 * charge_electron * (dy/dt) * W_y[:, :, 4].copy()\n \n # Computing current density using currents\n\n Jy = (1/(dx * dy)) * Jy\n\n # Preparing the final index and current vectors\n ###########################################################################################\n \n \n # Determining the x indices for charge deposition\n index_list_x_Jx = af.flat(af.tile(index_list_x, 1, 1, 5))\n\n # Determining the y indices for charge deposition\n y_current_zone = af.tile(index_list_y, 1, 1, 5)\n index_list_y_Jx = af.flat(af.reorder(y_current_zone, 0, 2, 1))\n\n\n currents_Jx = af.flat(Jx)\n\n # Determining the x indices for charge deposition\n index_list_x_Jy = af.flat(af.tile(index_list_x, 1, 1, 5))\n\n # Determining the y indices for charge deposition\n y_current_zone = af.tile(index_list_y, 1, 1, 5)\n index_list_y_Jy = af.flat(af.reorder(y_current_zone, 0, 2, 1))\n \n # Flattenning the Currents array\n currents_Jy = af.flat(Jy)\n\n af.eval(index_list_x_Jx, index_list_y_Jx)\n af.eval(index_list_x_Jy, index_list_y_Jy)\n af.eval(currents_Jx, currents_Jy)\n\n\n return index_list_x_Jx, index_list_y_Jx, currents_Jx,\\\n index_list_x_Jy, index_list_y_Jy, currents_Jy", "def calc_gradu_gradv_p1_partly(topo,x,y):\n ndofs = max(x.shape)\n\n (rows,cols)= la_utils.get_sparsity_pattern(topo)\n\n values = np.zeros(rows.shape)\n\n for row in topo:\n x_l = x[row]\n y_l = y[row]\n eval_points = np.zeros((0,2))\n\n (phi_dx,phi_dy,phi,omega) = basis.tri_p1(x_l,y_l,eval_points)\n dx_j = phi_dx\n dx_i = phi_dx.transpose()\n dy_j = phi_dy\n dy_i = phi_dy.transpose()\n local_matrix = omega*(np.dot(dx_i,dx_j)+np.dot(dy_i,dy_j))\n values = la_utils.add_local_to_global_coo(rows,cols,values,\n row,row,local_matrix)\n\n A = sparse.coo_matrix((values,(rows,cols)),shape=(ndofs,ndofs))\n #plt.spy(A)\n #plt.show()\n A.tocsr()\n\n return A", "def local_to_global(local_index):\n return global_index.value.get(tokens.value[local_index], -1)", "def dc_nodes(path_dict, num_node, place_type, topo_type):\n \n if topo_type == \"ring\":\n # ring topology\n #node_x = random.choice(range(1, num_node + 1))\n node_x = 1\n if place_type == 'a':\n node_y = (node_x + 1) % num_node\n elif place_type == 'f':\n if num_node % 2 == 1: # odd number of nodes\n node_y = (node_x + (num_node + 1) / 2) % num_node\n else:\n node_y = (node_x + num_node / 2) % num_node\n if node_y == 0:\n node_y = num_node\n if topo_type == \"grid\":\n # grid topology: m-by-m\n top, bottom, left, right = grid_edges(num_node)\n corner = grid_corner(num_node)\n h_edges = deepcopy(top)\n v_edges = deepcopy(left)\n h_edges.extend(bottom)\n v_edges.extend(right)\n #side = set().union(set(top), set(bottom), set(left), set(right))\n \n if place_type == 's':\n #side_nonc = side.difference(set(corner))\n #node_x = random.choice(list(side_nonc))\n #node_x = random.choice(top)\n node_x = 2\n if node_x in h_edges and node_x not in corner:\n node_y = node_x + 1\n elif node_x in v_edges and node_x not in corner:\n node_y = int(node_x + math.sqrt(num_node))\n else:\n pass\n print \"Data Center Nodes\", node_x, node_y\n if place_type == 'c':\n #node_x = random.choice(corner)\n node_x = 1\n node_y = num_node + 1 - node_x\n if place_type == 'a':\n # two data centers are adjacent in the center\n if num_node % 2 == 1:\n # odd number of nodes\n node_x = int((1 + num_node) / 2)\n else:\n # even number of nodes\n node_x = int((num_node - math.sqrt(num_node)) / 2)\n node_y = node_x + 1 # inside the grid\n print \"Data Center Nodes\", node_x, node_y \n return (node_x, node_y)", "def local_gpu_dimshuffle_0(node):\r\n if isinstance(node.op, tensor.DimShuffle):\r\n input, = node.inputs\r\n if input.owner and isinstance(input.owner.op, HostFromGpu):\r\n # move the add to a GpuAdd\r\n new_op = GpuDimShuffle(node.op.input_broadcastable,\r\n node.op.new_order)\r\n return [host_from_gpu(new_op(gpu_from_host(input)))]\r\n if isinstance(node.op, GpuFromHost):\r\n host_input = node.inputs[0]\r\n if host_input.owner and isinstance(host_input.owner.op,\r\n tensor.DimShuffle):\r\n dimshuffle_node = host_input.owner\r\n new_op = GpuDimShuffle(dimshuffle_node.op.input_broadcastable,\r\n dimshuffle_node.op.new_order)\r\n return [new_op(gpu_from_host(dimshuffle_node.inputs[0]))]\r\n return False", "def get_analytically_computed_optimization_parameter_indices(self):\n indices = []\n if '/offsetParameterIndices' in self.f:\n indices.extend(self.f['/offsetParameterIndices'])\n\n if '/scalingParameterIndices' in self.f:\n indices.extend(self.f['/scalingParameterIndices'])\n\n if '/sigmaParameterIndices' in self.f:\n indices.extend(self.f['/sigmaParameterIndices'])\n\n return list(set(indices))", "def test_local_useless_inc_subtensor_no_opt():\n x = matrix(\"x\")\n y = matrix(\"y\")\n\n s = x[:, ::2]\n o_shape = set_subtensor(s, specify_shape(y, s.shape))\n\n mode = get_default_mode().including(\"local_useless_inc_subtensor\")\n f_shape = function([x, y], o_shape, mode=mode)\n\n topo = f_shape.maker.fgraph.toposort()\n assert any(isinstance(n.op, IncSubtensor) for n in topo)\n\n out = f_shape([[2, 3, 6, 7]], [[8, 9]])\n assert np.array_equal(out, np.asarray([[8, 3, 9, 7]]))\n\n # This is an increment with a non-constant target array\n s = x[:, :]\n o_shape = inc_subtensor(s, specify_shape(y, s.shape))\n\n f_shape = function([x, y], o_shape, mode=mode)\n\n topo = f_shape.maker.fgraph.toposort()\n assert any(isinstance(n.op, IncSubtensor) for n in topo)\n\n # This is an increment with a non-zero target array\n s = at.ones((2, 2))[:, :]\n o_shape = inc_subtensor(s, specify_shape(y, s.shape))\n\n f_shape = function([y], o_shape, mode=mode)\n\n topo = f_shape.maker.fgraph.toposort()\n assert any(isinstance(n.op, IncSubtensor) for n in topo)", "def _getCoordsets(self, indices=None):\n\n if self._coords is None:\n return None\n\n try:\n return self._coords if indices is None else self._coords[indices]\n except:\n raise IndexError('indices must be an integer, a list/array of '\n 'integers, a slice, or None')", "def get_local_ordering(field_i, petsc_dofs_conn, use_expand_dofs=False):\n if use_expand_dofs:\n petsc_dofs = nm.empty(field_i.n_nod * field_i.n_components,\n dtype=nm.int32)\n\n else:\n petsc_dofs = nm.empty(field_i.n_nod, dtype=nm.int32)\n econn = field_i.econn\n if use_expand_dofs:\n econn = expand_dofs(econn, field_i.n_components)\n petsc_dofs[econn] = petsc_dofs_conn\n\n return petsc_dofs", "def get_allowed_topologies(user):\n try:\n up = user.get_profile()\n except AttributeError:\n return db.Topology.objects.none()\n\n if user.has_perm(\"vnswww.topology_use_any\"):\n # We can view and use any templates\n topos = db.Topology.objects.filter()\n else:\n q_own = Q(owner=user)\n q_permitted = Q(allowed_users=user)\n q_org = Q(org=user.get_profile().org)\n q_public = Q(public=True)\n if user.has_perm(\"vnswww.topology_use_org\"):\n print \"Allowed all topos in own org\"\n # We can view and use any from the user's organization\n topos = db.Topology.objects.filter(q_permitted | q_org | q_own)\n else:\n print \"NOT allowed all topos in own org\"\n # We can view any from our own organization which are protected\n topos = db.Topology.objects.filter(q_permitted | q_own)\n\n return topos", "def _Dynamic_GetIndices(self, app_str, composite_indices, request_id=None):\n self._RemoteSend(app_str, composite_indices, \"GetIndices\", request_id)\n return composite_indices" ]
[ "0.78130454", "0.55268973", "0.5226445", "0.51116353", "0.50954676", "0.48668343", "0.4836725", "0.47469142", "0.4703786", "0.4642443", "0.45836034", "0.45445976", "0.45384085", "0.45181572", "0.44909367", "0.4485813", "0.4471921", "0.44507834", "0.4396532", "0.4384738", "0.43786052", "0.43531397", "0.43459877", "0.43445668", "0.43338853", "0.43320948", "0.43223488", "0.43058595", "0.43007943", "0.428299", "0.4270128", "0.42692563", "0.42632246", "0.4248366", "0.42417926", "0.42400268", "0.42152652", "0.42042997", "0.4203478", "0.41946328", "0.41928077", "0.4181062", "0.4174966", "0.41730064", "0.41723782", "0.4169507", "0.41637972", "0.4162518", "0.4161588", "0.41576642", "0.41520974", "0.41490707", "0.41437572", "0.4140218", "0.41345632", "0.41272554", "0.41235763", "0.41013578", "0.40960386", "0.40845978", "0.40796107", "0.40735933", "0.4070606", "0.40692648", "0.40669197", "0.4063516", "0.40545586", "0.40538573", "0.40535092", "0.40500355", "0.4039221", "0.403782", "0.40121183", "0.40120006", "0.40025258", "0.40024054", "0.39990562", "0.3997721", "0.3984169", "0.39778054", "0.397188", "0.3966266", "0.39646623", "0.39643046", "0.3962462", "0.39624086", "0.39592394", "0.39431748", "0.39368743", "0.3936012", "0.3931767", "0.39297622", "0.39264214", "0.392395", "0.39234212", "0.39188603", "0.39168203", "0.3915141", "0.39131337", "0.39118755" ]
0.7679277
1
Return true if all mpi processes of child belong to parent
def is_parent(child, parent): # Get the list of processes assert child is not None assert parent is not None #child_ranks = [i for i in xrange(child.Get_size())] child_group = child.Get_group() parent_group = parent.Get_group() inter_group = MPI.Group.Intersect(child_group, parent_group) return child_group.Get_size() == inter_group.Get_size()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_mpi_peer_processes():\n return mpi4py_available and MPI.COMM_WORLD.Get_size() > 1", "def contains_parent(self, pid):\n return pid in self._parent_ids", "def check_parent_processes_alive():\n cur_process = psutil.Process()\n parent = cur_process.parent()\n while True:\n time.sleep(1)\n if not parent.is_running():\n break\n\n logger.warning(\"Parent process is terminated abnormally. Process exits.\")\n cur_process.kill()", "def is_multigpu_child_process():\n return (dist.is_initialized() or \"TORCHELASTIC_RUN_ID\" in os.environ) and os.environ[\"LOCAL_RANK\"] != \"0\"", "def i_am_root():\n try:\n return True if mpi_rank() == 0 else False\n except AttributeError:\n # not running MPI\n return True", "def is_known(self, child):\r\n return child in self._parents", "def __contains__(self, pid):\n return self.contains_child(pid) or self.contains_parent(pid)", "def contains_child(self, pid):\n return pid in self._children_ids", "def pure_mpi(self):\n return self.has_mpi and not self.has_omp", "def is_child_of(self, *args):\n return _ida_hexrays.cexpr_t_is_child_of(self, *args)", "def islchild(self):\n\t\tif (self.parent() and self.parent().lchild() is self): #TODO is or == here\n\t\t\treturn True\n\t\treturn False", "def am_i_root():\n if WITH_MPI:\n return not ME\n else:\n return os.getpid() == MASTER_PID", "def has_mpi(self):\n return bool(self.mpi_runner)", "def hasChildren():", "def true_partition(self):\n if 'NA' in self.mothers or 'NA' in self.fathers:\n warn('Warning: one or more individuals has at least one parent of unkown identity.')\n warn('All such individuals will be assigned to the same sibship group.')\n\n # concatenate mother and father names to create a vector of parent pairs.\n #parentage = np.array([str(self.mothers[o]) + '/' + str(self.fathers[o]) for o in range(noffs)])\n possible_families = np.unique(self.parents) # get a list of all unique parent pairs\n\n partitions = np.zeros(self.size).astype('int') # empty vector of zeros.\n for o in range(self.nfamilies):\n # for every possible family label individuals with an identical integer.\n partitions[self.parents == possible_families[o]] += o\n\n return partitions", "def is_state_a_child(child: State, parent: State) -> bool:\n if child.x >= parent.x and child.y >= parent.y and child.x <= parent.x + parent.width and child.y<=parent.y+parent.height:\n return True\n return False", "def is_mpi_env():\n try:\n import mpi4py\n except ImportError:\n return False\n\n try:\n import mpi4py.MPI\n except ImportError:\n return False\n\n if mpi4py.MPI.COMM_WORLD.size == 1 and mpi4py.MPI.COMM_WORLD.rank == 0:\n return False\n return True", "def _isSubProcessRunning(self): \n # Check if child process has terminated. Set and return returncode attribute.\n if self.__process.poll() is None:\n return True\n else:\n return False", "def can_add_child(self, child):\n if not self.is_valid_child(child):\n return False\n if child.isa == u'PBXGroup':\n return len(func.take(\\\n lambda c: c.pbx_name == child.pbx_name and c.realpath() == child.realpath(),\\\n self.pbx_children)) == 0\n else:\n return len(func.take(lambda c:c.realpath() == child.realpath(), self.pbx_children)) == 0", "def has_parents(self):\n return len(self._parents) > 0", "def can_use_mpi_pool():\n return ALLOW_SPAWN or ALREADY_RUNNING_AS_MPI", "def _refers_to_parent_table(self) -> bool:\n pt = self.parent_persist_selectable\n mt = self.child_persist_selectable\n result = False\n\n def visit_binary(binary: BinaryExpression[Any]) -> None:\n nonlocal result\n c, f = binary.left, binary.right\n if (\n isinstance(c, expression.ColumnClause)\n and isinstance(f, expression.ColumnClause)\n and pt.is_derived_from(c.table)\n and pt.is_derived_from(f.table)\n and mt.is_derived_from(c.table)\n and mt.is_derived_from(f.table)\n ):\n result = True\n\n visitors.traverse(self.primaryjoin, {}, {\"binary\": visit_binary})\n return result", "def any_parent_has_power(self, member_name):\n for parent in self.parents_of(member_name):\n if parent.has_power:\n return True\n \n return False", "def is_parent(self):\n return not self.children", "def is_consistent_with(self, target):\n same_parent = self.parent() == target.parent()\n # Note FP. Is it really required to have the\n # same parent? Inclusion of all proc may be enough?\n return npw.equal(self.shape, target.shape).all() and same_parent", "def is_parent(self, item):\n if len(self.df.loc[self.df['parent_code']==item, :]): \n return True\n return False", "def is_parent(self):\n if self.parent is not None:\n return False\n return True", "def hybrid_mpi_omp(self):\n return self.has_omp and self.has_mpi", "def has_parent(self, index):\n return self.get_parent_index(index) < len(self.heap)", "def hasparents(self):\n return bool(self.parents)", "def is_island(self):\n return bool(not self.children.exists() and not self.parents.exists())", "def initialize_mpi(self):\n return False", "def has_parent(self):\n return False", "def is_state_a_child_by_coord(x, y, width, height, parent: State) -> bool:\n if x+1 >= parent.x and y+1 >= parent.y and x + width - 1 <= parent.x + parent.width:\n if y + height - 1 <= parent.y + parent.height:\n return True\n return False", "def has_parent(self):\n return self.parent != None", "def has_nodes_for_process(self, uuid, clean=True):\n return self.get_nodes_for_process(uuid, clean).exists()", "def can_communicate_with(self, target):\n if self == target:\n return True\n msg = 'You try to connect topologies belonging to'\n msg += ' two different mpi tasks. Set taskids properly or use'\n msg += ' InterBridge.'\n assert self.task_id() == target.task_id(), msg\n\n # Parent communicator\n # Todo : define some proper conditions for compatibility\n # between topo_from, topo_to and parent:\n # - same size\n # - same domain\n # - common processus ...\n # At the time we check that both topo have\n # the same comm_origin.\n return self.is_consistent_with(target)", "def is_parent_of(cls, *args):\n return cls.graph_traversal(None, None, Bytecode()).is_parent_of(*args)", "def is_found_in_parents(mcs, name, parents):\n for parent in parents:\n\n for cls in reversed(parent.__mro__):\n\n if hasattr(cls, name):\n return True\n\n if cls.__class__ is mcs:\n break\n\n return False", "def find_all_child_processes(pids_only=False):\n children = psutil.Process().children(recursive=True)\n return [child.pid for child in children] if pids_only else children", "def setChildPIDs(self):\n\t\tpids = []\n\t\tfor proc in process_iter():\n\t\t\tfor child in self.expectedChildren:\n\t\t\t\tif child == proc.name():\n\t\t\t\t\tif proc.parent().name() == \"Python\": # Hardcoded string comparison. Sue me.\n\t\t\t\t\t\tpids.append(proc.pid)\n\t\tself.pids = pids", "def race_condition():\n if len(allocated_pids) != len(set(allocated_pids)):\n return True\n else:\n return False", "def mpi_fork(n):\n if n<=1: \n return \"child\"\n if os.getenv(\"IN_MPI\") is None:\n env = os.environ.copy()\n env.update(\n MKL_NUM_THREADS=\"1\",\n OMP_NUM_THREADS=\"1\",\n IN_MPI=\"1\"\n )\n subprocess.check_call([\"mpirun\", \"-np\", str(n), sys.executable] + sys.argv, env=env)\n return \"parent\"\n else:\n return \"child\"", "def __contains__(self, item):\n if item in self._parents:\n return True\n else:\n return False", "def SBO_isPhysicalParticipant(*args):\n return _libsbml.SBO_isPhysicalParticipant(*args)", "def isPhysicalParticipant(*args):\n return _libsbml.SBO_isPhysicalParticipant(*args)", "def is_parent_of(self):\n return self.hasLabel('parent_of')", "def is_parent(self) -> bool:\n return AccountEntry.objects.filter(parent=self).exists()", "def check_dataset_children_ids(self):\n # Start with the set of pandas that have no children\n childless_ids = [p['_id'] for p in self.vertices\n if (p['children'] == \"none\" \n or p['children'] == \"unknown\")]\n # Finish with the pandas that have no recorded parents\n all_child_ids = [x['_out'] for x in self.edges]\n parentless_ids = [y for y in range(1, self.sum_pandas())\n if y not in all_child_ids]\n # Sets of edges we can start or finish on\n starting_edges = [s for s in self.edges \n if s['_out'] in childless_ids]\n finishing_edges = [f for f in self.edges\n if f['in'] in parentless_ids]\n # This is hard to write :)\n pass", "def has_parent(self):\n return self._parent_ is not None", "async def _check_parent():\n try:\n curr_proc = psutil.Process()\n parent_death_cnt = 0\n while True:\n parent = curr_proc.parent()\n # If the parent is dead, it is None.\n parent_gone = parent is None\n init_assigned_for_parent = False\n parent_changed = False\n\n if parent:\n # Sometimes, the parent is changed to the `init` process.\n # In this case, the parent.pid is 1.\n init_assigned_for_parent = parent.pid == 1\n # Sometimes, the parent is dead, and the pid is reused\n # by other processes. In this case, this condition is triggered.\n parent_changed = self.ppid != parent.pid\n\n if parent_gone or init_assigned_for_parent or parent_changed:\n parent_death_cnt += 1\n logger.warning(\n f\"Raylet is considered dead {parent_death_cnt} X. \"\n f\"If it reaches to {_PARENT_DEATH_THREASHOLD}, the agent \"\n f\"will kill itself. Parent: {parent}, \"\n f\"parent_gone: {parent_gone}, \"\n f\"init_assigned_for_parent: {init_assigned_for_parent}, \"\n f\"parent_changed: {parent_changed}.\"\n )\n if parent_death_cnt < _PARENT_DEATH_THREASHOLD:\n await asyncio.sleep(\n dashboard_consts.DASHBOARD_AGENT_CHECK_PARENT_INTERVAL_S\n )\n continue\n\n log_path = os.path.join(self.log_dir, \"raylet.out\")\n error = False\n msg = f\"Raylet is terminated: ip={self.ip}, id={self.node_id}. \"\n try:\n with open(log_path, \"r\", encoding=\"utf-8\") as f:\n # Seek to _RAYLET_LOG_MAX_TAIL_SIZE from the end if the\n # file is larger than that.\n f.seek(0, io.SEEK_END)\n pos = max(0, f.tell() - _RAYLET_LOG_MAX_TAIL_SIZE)\n f.seek(pos, io.SEEK_SET)\n # Read remaining logs by lines.\n raylet_logs = f.readlines()\n # Assume the SIGTERM message must exist within the last\n # _RAYLET_LOG_MAX_TAIL_SIZE of the log file.\n if any(\n \"Raylet received SIGTERM\" in line\n for line in raylet_logs\n ):\n msg += \"Termination is graceful.\"\n logger.info(msg)\n else:\n msg += (\n \"Termination is unexpected. Possible reasons \"\n \"include: (1) SIGKILL by the user or system \"\n \"OOM killer, (2) Invalid memory access from \"\n \"Raylet causing SIGSEGV or SIGBUS, \"\n \"(3) Other termination signals. \"\n f\"Last {_RAYLET_LOG_MAX_PUBLISH_LINES} lines \"\n \"of the Raylet logs:\\n\"\n )\n msg += \" \" + \" \".join(\n raylet_logs[-_RAYLET_LOG_MAX_PUBLISH_LINES:]\n )\n error = True\n except Exception as e:\n msg += f\"Failed to read Raylet logs at {log_path}: {e}!\"\n logger.exception(msg)\n error = True\n if error:\n logger.error(msg)\n # TODO: switch to async if necessary.\n ray._private.utils.publish_error_to_driver(\n ray_constants.RAYLET_DIED_ERROR,\n msg,\n gcs_publisher=ray._raylet.GcsPublisher(\n address=self.gcs_address\n ),\n )\n else:\n logger.info(msg)\n sys.exit(0)\n else:\n parent_death_cnt = 0\n await asyncio.sleep(\n dashboard_consts.DASHBOARD_AGENT_CHECK_PARENT_INTERVAL_S\n )\n except Exception:\n logger.exception(\"Failed to check parent PID, exiting.\")\n sys.exit(1)", "def slaves_found(self):\n return not (len(self.topology) and self.topology[0][1] == [])", "def parents(self, asset_vid):\n return self \\\n .asset(asset_vid) \\\n .inE() \\\n .is_parent_of()", "def has_child(self):\n return False", "def is_parent(self, mother, kid):\n mom_node = self.names_to_nodes[mother]\n child_node = self.names_to_nodes[kid]\n return child_node.is_parent(mom_node)", "def mpi_procs(self):\n return self._mpi_procs", "def wait_for_parent():\n wait_for_pid(os.getppid())", "def orphaned(self):\n return (self.parent is None)", "def check_processes(process_list):\n running = 1 # 0 when the subprocesses are all done\n while running:\n for proc in process_list:\n proc.poll()\n if proc.returncode == 1:\n raise RuntimeError(\"Program \" +\n \"number \" +\n \"{}\".format(process_list.index(proc)) +\n \" failed.\")\n running = bool(sum([int(proc.returncode) for proc in process_list]))\n return True", "def _duplicate_child_allowed_check(self):\n\n for rule in self.options[\n 'parent_allows_duplicate_child']:\n if self.lineage_test(rule):\n return True\n return False", "def _has_parents(self, node: CFNode) -> bool:\n return bool(self._graph._backedges[node])", "def has_children(self):\n return False", "def is_child(self, kid, mother): \n mom_node = self.names_to_nodes[mother] \n child_node = self.names_to_nodes[kid]\n return mom_node.is_child(child_node)", "def test_PSA_ONLY_PROCESS(self):\n self.verify_references_to_prerequisites(processes.PSA_ONLY_PROCESS)", "def haschildren(self):\n return bool(self.children)", "def hasChildren(self):\n return self.childCount() > 0", "def getChildPIDs(self):\n\t\treturn self.pids", "def inside_itself(self):\n for i in range(2, len(self.nodes)):\n if self.nodes[0] == self.nodes[i]:\n return True\n return False", "def isRunning(self):\n if not self.hasBeenStarted():\n return False\n \n if not self._slave_dhcp_client_proc.poll(): # Poll our direct child (sudo)\n return False\n \n for pid in self._all_processes_pid:\n if not self._checkPid(pid):\n return False\n \n return True", "def mpi_fork(n, bind_to_core=False):\n if n<=1:\n return \"child\"\n if os.getenv(\"IN_MPI\") is None:\n env = os.environ.copy()\n env.update(\n MKL_NUM_THREADS=\"1\",\n OMP_NUM_THREADS=\"1\",\n IN_MPI=\"1\"\n )\n args = [\"mpirun\", \"-np\", str(n)]\n if bind_to_core:\n args += [\"-bind-to\", \"core\"]\n args += [sys.executable] + sys.argv\n subprocess.check_call(args, env=env)\n return \"parent\"\n else:\n return \"child\"", "def can_be_parent(self, give_reason=False):\n reason = None\n if self.is_child:\n reason = _(\"The specified parent product is a child product.\")\n if self.has_stockrecords:\n reason = _(\"One can't add a child product to a product with stock records.\")\n is_valid = reason is None\n if give_reason:\n return is_valid, reason\n else:\n return is_valid", "def is_master(self, process_group: ProcessGroup = None) -> bool:\n rank = dist.get_rank(group=process_group)\n return rank == 0", "def is_main_process(args: dict):\n\n return not is_distributed(args) or args.local_rank == 0", "def is_subhalo(self, childid, parentid):\n if (childid in self._halos[parentid].properties['children']):\n return True\n else:\n return False", "def all_enter(self):\n return self.num_enters == self.num_workers", "def _isthisapropertree(self):\n ok = True\n if self._leftchild:\n if self._leftchild._parent != self:\n ok = False\n if self._leftchild._isthisapropertree() == False:\n ok = False\n if self._rightchild:\n if self._rightchild._parent != self:\n ok = False\n if self._rightchild._isthisapropertree() == False:\n ok = False\n if self._parent:\n if (self._parent._leftchild != self\n and self._parent._rightchild != self):\n ok = False\n return ok", "def is_started(self):\n return bool(self._processes)", "def _isthisapropertree(self):\n ok = True\n if self._leftchild is not None:\n if self._leftchild._parent != self:\n ok = False\n if self._leftchild._isthisapropertree() is False:\n ok = False\n if self._rightchild is not None:\n if self._rightchild._parent != self:\n ok = False\n if self._rightchild._isthisapropertree() is False:\n ok = False\n if self._parent is not None:\n if self not in (self._parent._leftchild, self._parent._rightchild):\n ok = False\n return ok", "def is_alive(self):\n result = execute('ps -Ao pgid', check_pg_alive=False, stdout=PIPE)\n pgids = result['stdout'].decode('utf8').split()\n return str(self.process.pid) in pgids", "def is_proc_running(name):\n\n for p in psutil.process_iter(['name']):\n if p.info['name'] == name:\n return True\n\n return False", "def depends_on_process(self, process):\n for output_resource in process.iter_outputs():\n if self.has_input(output_resource):\n return True\n return False", "def haschild(self, child):\n return pbxhelper.pbxobj_has_pbxlist_value(self, u'pbx_children', child, \\\n self.is_valid_child)", "def _is_alive(self, pid):\n process = next(x for x in self._processes if x.pid == pid)\n return process.is_alive()", "def independent(self) -> bool:\n parent = self._parent()\n if parent is None:\n return True\n connections = parent._graph.connections\n path = self._path\n lp = len(path)\n for con in connections:\n if con[\"type\"] == \"connection\":\n if con[\"target\"][:lp] == path:\n return False\n return True", "def internal(self):\n if self._leftchild or self._rightchild:\n return True\n return False", "def is_process_running(name):\n if not hasattr(is_process_running, \"proc\"):\n is_process_running.proc = None # it doesn't exist yet, so init it\n\n if is_process_running.proc:\n if is_process_running.proc.is_running():\n return True\n else:\n is_process_running.proc = None\n return False\n else:\n for p in psutil.process_iter():\n if p.name() == name:\n is_process_running.proc = p\n return True\n #\n return False", "def has_children(self):\n\n pass", "def check_subprocesses(self) : \n for sp_ident in self.active_subprocesses :\n if not os.path.exists(\"%s/%s.rc\" % (self.spool_dir, sp_ident ) ) : continue\n self.finished_subprocesses[sp_ident] = self.get_subprocess_result(sp_ident)\n self.active_subprocesses.pop(sp_ident)", "def test_process_parent_id():\n output = sh.process_parent_id()\n assert isinstance(output, int) and output > 0", "def is_primary(self):\n\n return not self.parent.non_primary", "def checkUniqueChild(RelationShipList, child):\r\n if not RelationShipList:\r\n return 1\r\n for i in RelationShipList:\r\n if i[0] == child:\r\n return 0\r\n return 1", "def is_occupied(self, pos):\n return any([p == pos for p in self._workers.values()])", "def get_child_processes(self, ppid):\n\n all_children = []\n children_to_explore = set()\n for _pid in self.parent_to_children_map[ppid]:\n all_children.append(_pid)\n children_to_explore.add(_pid)\n\n # get the children 'recursively'\n while children_to_explore: # the invariant\n child_to_explore = children_to_explore.pop()\n if not self.parent_to_children_map.get(child_to_explore):\n continue\n unvisited = self.parent_to_children_map[child_to_explore]\n for node in unvisited:\n if node not in all_children:\n children_to_explore.add(node)\n all_children.append(node)\n return list(set(all_children))", "def is_running(proc_name:str) -> bool:\r\n with Popen(\"tasklist /NH /FO TABLE\", shell=False, stdout=PIPE) as proc:\r\n rprocs = proc.stdout.read().decode(\"utf-8\")\r\n plist = rprocs.split(\"\\r\\n\")\r\n return(any(i.lower().startswith(proc_name.lower()) for i in plist))", "def has_child(self, uid: str) -> bool:\n return uid in self._children_uids", "def any_parent_is_group(self):\n if self._any_parent_is_group is None:\n return super(SettingObject, self).any_parent_is_group\n return self._any_parent_is_group", "def hasSiblings():", "def isHandle(self):\n return self.type in mpi_handle_types", "def is_cousin(parent_db, A, B):\n parent_dict = {}\n for item in parent_db:\n if item[0] in parent_dict: #If parent is already in the dictionary, add this child to value (set of children)\n parent_dict[item[0]].add(item[1])\n else:\n parent_dict[item[0]] = {item[1]}\n\n child_dict = {}\n for item in parent_db:\n if item[1] in child_dict: #If child is already in the dictionary, add this parent to value (set of parents)\n child_dict[item[1]].add(item[0])\n else:\n child_dict[item[1]] = {item[0]}\n\n if A==B:\n return None\n\n for parent in parent_dict:\n if A in parent_dict[parent] and B in parent_dict[parent]: #Checking if they share the same parent\n return None\n\n grandparents_A = set()\n for parent in child_dict[A]: #Iterating through parents of A\n for grandparent in child_dict[parent]: #Iterating through parents of parents of A (grandparents of A)\n grandparents_A.add(grandparent)\n\n for parent in child_dict[B]: #Iterating through parents of B\n for grandparent in child_dict[parent]: #Iterating through parents of parents of B (grandparents of B)\n if grandparent in grandparents_A:\n return grandparent\n\n return None", "def has_children(self) -> bool:\n\n return False" ]
[ "0.6905076", "0.6545807", "0.6406559", "0.63527286", "0.6351458", "0.6243226", "0.62221795", "0.6163074", "0.6159543", "0.615525", "0.6027681", "0.5970372", "0.5956478", "0.5952117", "0.5878544", "0.58695066", "0.5852495", "0.579879", "0.57776415", "0.5767349", "0.57645994", "0.5742941", "0.5737655", "0.57161695", "0.57145613", "0.5667657", "0.5655679", "0.56465036", "0.5643793", "0.5592586", "0.55539525", "0.5548281", "0.55212736", "0.5521219", "0.55125207", "0.55117273", "0.55050296", "0.5504925", "0.54988086", "0.5492439", "0.5484441", "0.5470231", "0.545942", "0.5440553", "0.54295075", "0.54129857", "0.5393335", "0.5372472", "0.536234", "0.5361769", "0.53535444", "0.533912", "0.5331422", "0.5325898", "0.5303328", "0.53021926", "0.5293336", "0.5280909", "0.527987", "0.52784395", "0.5244915", "0.5241938", "0.5231007", "0.5230003", "0.5228234", "0.5220014", "0.5214156", "0.521324", "0.5208382", "0.5194836", "0.51824623", "0.51814836", "0.5178164", "0.51778793", "0.5175389", "0.51693183", "0.516911", "0.51525337", "0.5148981", "0.51418775", "0.5136699", "0.5132132", "0.5129656", "0.5129034", "0.51274747", "0.5126349", "0.51259154", "0.51131344", "0.5111657", "0.51095396", "0.51086336", "0.5107356", "0.5100201", "0.5093657", "0.50872535", "0.50790423", "0.5078988", "0.507272", "0.5070429", "0.50688463" ]
0.82558614
0
Number of processess common to comm_1 and comm_2
def intersection_size(comm_1, comm_2): if comm_1 == MPI.COMM_NULL or comm_2 == MPI.COMM_NULL: return None group_1 = comm_1.Get_group() group_2 = comm_2.Get_group() inter_group = MPI.Group.Intersect(group_1, group_2) return inter_group.Get_size()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def common_count(self, node_1, node_2):\n return int(len(set(nx.neighbors(self.graph, node_1)).intersection(set(nx.neighbors(self.graph, node_2)))))", "def communities_with_protesters(partition, active_nodes):\n return len(set([partition[node] for node in active_nodes]))", "def _num_of_consolidated(self, observation):\n a = set(observation)\n b = set(np.arange(self.num_of_servers))\n intersect = b - a\n return len(intersect)", "def _num_of_consolidated(self, observation):\n a = set(observation)\n b = set(np.arange(self.num_of_servers))\n intersect = b - a\n return len(intersect)", "def calc_process_cohesion(partitions, graph):\n ch = 0\n for part in partitions:\n crc = calc_community_relation_cohesion(part, graph)\n cic = calc_community_information_cohesion(part, graph)\n ch = ch + (crc * cic)\n ch = ch / len(partitions)\n return ch", "def protesting_communities(partition, active_nodes):\n communities = defaultdict(int)\n for node in active_nodes:\n communities[partition[node]] += 1\n return communities", "def count_common_connections(network, user_A, user_B):\n count = 0\n if user_A not in network or user_B not in network:\n return False\n for person in network[user_A][0]:\n if person in network[user_B][0]:\n count += 1\n return count", "def calc_community_information_cohesion(partition, graph):\n pre_suc = list()\n for vertex in partition:\n pre_suc.extend(get_unique_predecessors_successors(vertex, graph))\n pre_suc = get_duplicates(pre_suc)\n if len(pre_suc) == 0:\n cic = 0\n else:\n cic = len(pre_suc) / len(partition)\n return cic", "def compare_comm(comm_1, comm_2):\n assert comm_1 != MPI.COMM_NULL\n assert comm_2 != MPI.COMM_NULL\n result = MPI.Comm.Compare(comm_1, comm_2)\n res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL]\n return result == res[0]", "def neigh_comm(n):\n\n nc = {bl[n]: 0.0}\n for idx in range(0, node_count):\n neigh = idx\n ncomm = bl[neigh]\n nei_weight = network[n][idx]\n if (neigh != n) & (nei_weight > 0.0):\n if ncomm in nc:\n nc[ncomm] += nei_weight\n else:\n nc[ncomm] = nei_weight\n return nc", "def count_common_subgraphs(graph1, graph2, n1, n2,\n node_attrib='label', edge_attrib='label'):\n for graph in (graph1, graph2):\n assert nx.is_directed_acyclic_graph(graph)\n \n if graph1.node[n1][node_attrib] != graph2.node[n2][node_attrib]:\n return 0\n\n n1_children = dependency_children(graph1, n1, edge_attrib=edge_attrib)\n n2_children = dependency_children(graph2, n2, edge_attrib=edge_attrib)\n\n if not n1_children or not n2_children:\n return 0\n else:\n result = 1 # neutral element of multiplication\n for n1_target, n2_target in common_dependency_targets(graph1, graph2, n1, n2,\n node_attrib=node_attrib):\n result *= (count_common_subgraphs(graph1, graph2,\n n1_target, n2_target,\n node_attrib='label',\n edge_attrib='label') + 2)\n return result - 1", "def part1(programs):\n count = 0\n for program in programs:\n if program.connected(0)[0]:\n count += 1\n\n return count", "def commonCharacterCount(s1, s2):\n return sum(min(s1.count(x),s2.count(x)) for x in set(s1))", "def _num_conn_comp(graph):\n\n return nx.number_connected_components(graph)", "def count_common_connections(network, user_A, user_B):\n if user_A not in network or user_B not in network:\n return False\n common_connections = 0\n for conn in network[user_A]['connections']:\n if conn in network[user_B]['connections']:\n common_connections += 1\n return common_connections", "def countMatches(g1, g2):\n if g1 is None or g2 is None or len(g1) == 0 or len(g1[0]) == 0: # sanity check\n return 0\n count = 0\n for i in range(len(g1)):\n for j in range(len(g1[0])):\n if g1[i][j] == g2[i][j] == 1 and search_grid(g1, g2, i, j):\n count = count + 1\n return count", "def commonality(left_struc, right_struc):\n assert type(left_struc) is type(right_struc), (left_struc, right_struc)\n assert left_struc and right_struc, (left_struc, right_struc)\n if type(left_struc) is dict:\n (overlap, left, right) = compute_keysets(left_struc, right_struc)\n com = float(len(overlap))\n tot = len(overlap.union(left, right))\n else:\n assert type(left_struc) in (list, tuple), left_struc\n com = 0.0\n for elem in left_struc:\n if elem in right_struc:\n com += 1\n tot = max(len(left_struc), len(right_struc))\n\n return com / tot", "def _common_prefix(sequence1, sequence2):\n i = 0\n for elem1, elem2 in zip(sequence1, sequence2):\n if elem1 != elem2:\n return i\n i += 1\n\n # Return length of sequence if sequences are identical\n return min(len(sequence1), len(sequence2))", "def common_token_len(self, w1, d1, w2, d2):\n w1_tk = set(self.__stem_Tokens(w1))\n w2_tk = set(self.__stem_Tokens(w2))\n common_len = len(w1_tk.intersection(w2_tk))\n return common_len", "def compare_groups(comm_1, comm_2):\n assert comm_1 != MPI.COMM_NULL\n assert comm_2 != MPI.COMM_NULL\n result = MPI.Comm.Compare(comm_1, comm_2)\n res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL]\n return result in res[:-1]", "def get_common_words_count(arr1, arr2):\n return len(list(set(arr1).intersection(arr2)))", "def n_components(self):\n return 1", "def shared_nb(self):\n return self.bbsitting_set.count() + self.booked.count()", "def get_relatedness(theme1,theme2):\r\n nAB=0\r\n nAB_plus=0\r\n for sentence_1 in theme1.sentences:\r\n for sentence_2 in theme2.sentences:\r\n if cmp(sentence_1[2],sentence_2[2])==0:\r\n nAB=nAB+1\r\n if sentence_1[3]==sentence_2[3]:\r\n nAB_plus=nAB_plus+1\r\n if nAB==0:\r\n return 0\r\n else:\r\n return float(nAB_plus)/float(nAB)", "def position_counter(strains):\n with database.make_connection() as connection:\n pos = []\n for strain in strains:\n # Get every variant position\n cursor = r.table(TABLE).filter({'StrainID': strain}).pluck(\n 'Position').run(connection)\n cur = [strain['Position'] for strain in cursor]\n pos = pos+cur\n common = filter_counts(pos, len(strains))\n return common", "def calc_community_relation_cohesion(partition, graph):\n n_overlaps = get_number_of_overlaps(partition, graph)\n if n_overlaps > 0 and len(partition) > 1:\n avg_ov = ((n_overlaps * 2) / len(partition))\n crc = avg_ov / (len(partition) * len(partition) - 1)\n elif n_overlaps == 0:\n crc = 0\n elif len(partition) == 1:\n crc = n_overlaps\n else:\n crc = 0\n return crc", "def getDimensions(self, spSys1, spSys2):\n\n proton1, proton2 = spSys1.getAtoms()[0], spSys2.getAtoms()[0]\n\n assignments1 = [atom for assi in self.getProton1Assignments()\n for atom in assi.getAtoms()]\n\n assignments2 = [atom for assi in self.getProton2Assignments()\n for atom in assi.getAtoms()]\n\n dimensions1 = [proton1 in assignments1, proton1 in assignments2]\n dimensions2 = [proton2 in assignments1, proton2 in assignments2] \n\n dims = [None, None]\n\n if dimensions1.count(1) == 1 and dimensions2.count(1) == 1:\n dims[0] = dimensions1.index(1) + 1\n dims[1] = dimensions2.index(1) + 1\n \n\n elif dimensions1.count(1) == 1 and dimensions2.count(1) == 2:\n\n dims[0] = dimensions1.index(1) + 1\n dims[1] = 1\n if dims[0] == 1:\n dims[1] = 2\n\n elif dimensions1.count(1) == 2 and dimensions2.count(1) == 1:\n dims[1] = dimensions2.index(1) + 1\n dims[0] = 1\n if dims[1] == 1:\n dims[0] = 2\n\n elif dimensions1.count(1) == 2 and dimensions2.count(1) == 2:\n dims = [1,2]\n h1 = proton1.getHeteroAtom()\n h2 = proton2.getHeteroAtom()\n \n h1assi = [atom for assi in self.getHetero1Assignments()\n for atom in assi.getAtoms()]\n \n h2assi = [atom for assi in self.getHetero2Assignments()\n for atom in assi.getAtoms()]\n\n if h1assi:\n if h2 in h1assi:\n dims = [2, 1]\n \n if h2assi:\n if h1 in h2assi:\n dims = [2, 1]\n \n if None in dims:\n raise ValueError, 'spin pair could not be assigned to dimensions'\n\n else:\n return tuple(dims)", "def getNumPoints(self, l1, l2):\n n1 = self.pointcounts[l1]\n n2 = self.pointcounts[l2]\n self.pointcounts[('Cl_%d' % self.labelcount, l1, l2)] = n1 + n2\n return (n1, n2)", "def get_co_occurrences(self, word1, word2):\n raise NotImplementedError(\"Word2Vec model does not support co-occurrence counting\")", "def numcheck(list1, list2):\r\n set1 = set(list1)\r\n set2 = set(list2)\r\n #set3 contains all items common to set1 and set2\r\n set3 = set1.intersection(set2)\r\n # return number of matching items\r\n return len(set3)", "def matching_score(self,set1, set2):\n set_set1=set(set1)\n set_set2=set(set2)\n '''print(\" set_set12\")\n print(set_set1)\n print(set_set2)'''\n return len(set_set1.intersection(set_set2)) ** 2 / (float(len(set1)) * len(set2))\n #return len(set_set1.intersection(set_set2)) / len(set_set1.union(set_set2))", "def calc_cohesion( g, sg0, sg1, max_csize ) :\n score = 0.0\n n0 = len( sg0 )\n n1 = len( sg1 )\n if (n0 + n1 <= max_csize) :\n boundary_edges = networkx.edge_boundary( g, sg0, sg1 )\n for e in boundary_edges :\n score += g[e[0]][e[1]][\"similarity\"]\n return score / max( n0, n1 )", "def common_segments(p1, p2, common_func):\n common = common_func(split(p1), split(p2))\n lgth = len(common) if common else 0\n common = '/'.join(common) if common else None\n return common, lgth", "def calc_process_coupling_cohesion_ratio(partitions, graph):\n cp = calc_process_coupling(partitions, graph)\n ch = calc_process_cohesion(partitions, graph)\n if cp == 0 or ch == 0:\n pccr = 0\n else:\n pccr = cp / ch\n return float(pccr)", "def compare(strokes1, strokes2):\n\n score = 0\n for stroke_i in strokes1:\n match = identify(strokes2, stroke_i)\n score += match\n\n # draw1 = concat(strokes1)\n # draw2 = concat(strokes2)\n # draw1_length,_ = draw1.euclidian_length()\n # draw2_length,_ = draw2.euclidian_length()\n\n # tot_length = draw1_length# + draw2_length\n\n return score", "def entity_relatedness(self, a, b):\n occ_a = self.occurrences(a)\n occ_b = self.occurrences(b)\n occ_common = occ_a.intersection(occ_b)\n\n try:\n logmax = max(len(occ_a), len(occ_b))\n logmin = min(len(occ_a), len(occ_b))\n logint = len(occ_common)\n return (logmax - logint) / (self.LOGW - logmin)\n except ValueError:\n return 0.0", "def jointly_equally_distributed(\n class1: Av, class2: Av, n: int = 6, dim: int = 2\n ) -> Iterator[Tuple[str, ...]]:\n return (\n tuple(stat[0] for stat in stats)\n for stats in combinations(PermutationStatistic._STATISTICS, dim)\n if all(\n Counter(\n tuple(stat[1](p) for stat in stats) for p in class1.of_length(i)\n )\n == Counter(\n tuple(stat[1](p) for stat in stats) for p in class2.of_length(i)\n )\n for i in range(n + 1)\n )\n )", "def get_num_sync_workers(self, comm):\n if self.synchronous:\n return int( math.ceil( 0.95 * (comm.Get_size() - 1) ) )\n return 1", "def getCommon(self, st1, st2):\n fl = len(st1)\n shorter = len(st2)\n if fl < shorter:\n shorter = fl\n \n i = 0\n while i < shorter:\n if st1[i] != st2[i]:\n break\n i += 1\n return i", "def get_n_chains(self): \n res_id_cnt = 0\n tot_n_res = len(self.res_ids)\n n_chns = 0\n for res_id in self.res_ids:\n res_chn_i = res_id[2]\n if res_id_cnt > 1:\n if res_chn_i == self.res_ids[res_id_cnt-1][2]:\n pass\n else:\n n_chns+=1\n res_id_cnt+=1\n return n_chns", "def count_common(self, other, downsample=False):\n if not isinstance(other, MinHash):\n raise TypeError(\"Must be a MinHash!\")\n return self._methodcall(lib.kmerminhash_count_common, other._get_objptr(), downsample)", "def __len__(self):\n return len(self.qc_mol.atoms) + len(self.br_mol.atoms) + len(self.pc_mol.atoms)", "def common():\n snmp.common()\n return 0", "def match_occurences(chronicle1,chronicle2,paire,count) :\n listOccurencesMatch=[]\n for element in count:\n #chronicle[0] : events in the chronicle\n if element in chronicle1[0] :\n occElementChronicle1=get_occurence_chronicle(chronicle1,element)\n numChronicle=1\n if element in chronicle2[0] :\n occElementChronicle2=get_occurence_chronicle(chronicle2,element)\n numChronicle=2\n #if element is only in one of the two chronicles\n if count[element]<2 : \n occElementPaire=get_occurence_paire(paire,element)\n if(numChronicle==1) :\n nbMatch=count_similar_occurence(occElementChronicle1,occElementPaire)\n if(numChronicle==2) :\n nbMatch=count_similar_occurence(occElementChronicle2,occElementPaire)\n #if element is in the two chronicles\n if count[element]==2 : \n nbMatch=count_similar_occurence(occElementChronicle1,occElementChronicle2)\n listOccurencesMatch.append([element,nbMatch])\n return listOccurencesMatch", "def correspondences(labels1,labels2,return_counts=True):\n q = 100000\n assert amin(labels1)>=0 and amin(labels2)>=0\n assert amax(labels2)<q\n combo = labels1*q+labels2\n result = unique(combo, return_counts=return_counts)\n if return_counts:\n result, counts = result\n result = array([result//q,result%q,counts])\n else:\n result = array([result//q,result%q])\n return result", "def num_producers(self):\n producers = self.info_wells.groupby('well_type').get_group('prod')\n return producers['well'].nunique()", "def calc_process_coupling(partitions, graph):\n avg_pc = calc_avg_process_coupling(partitions, graph)\n if not (len(partitions) or avg_pc) == 0:\n pc = avg_pc / len(partitions)\n else:\n pc = 0\n return pc", "def part2(fname: dict) -> int:\n return sum(len(set.intersection(*[set(pax) for pax in group])) for group in get_data(fname))", "def number_of_connections(self, asn):\n customer_count = 0\n provider_count = 0\n peer_count = 0\n\n for neighbor in nx.all_neighbors(self, asn):\n edge_data = self.get_edge_data(asn, neighbor)\n if edge_data[\"relationship\"] == -1 and edge_data[\"as1\"] == asn:\n customer_count += 1\n elif edge_data[\"relationship\"] == -1 and edge_data[\"as2\"] == asn:\n provider_count += 1\n elif edge_data[\"relationship\"] == 0:\n peer_count += 1\n return customer_count, provider_count, peer_count", "def overlap(g, node_1, node_2):\n inter = len(set(nx.neighbors(g, node_1)).intersection(set(nx.neighbors(g, node_2))))\n return float(inter)", "def _count_concordant_pairs(preds: Tensor, target: Tensor) ->Tensor:\n return torch.cat([_concordant_element_sum(preds, target, i) for i in range(preds.shape[0])]).sum(0)", "def compare_sentences(first, second):\n if not len(first) or not len(second):\n return 0\n return len(set(only_important(first)) & set(only_important(second))) / ((len(first) + len(second)) / 2.0)", "def _common_length_of(l1, l2=None, l3=None):\n args = [];\n if l1 != None: args.append(l1)\n if l2 != None: args.append(l2)\n if l3 != None: args.append(l3)\n\n length = None\n num = 0\n for l in args:\n for i in l:\n num += 1\n length_i = len(i)\n if length!=None and length_i!=length:\n raise ValueError, \"Argument lengths differ!\"\n length = length_i\n\n return num, length", "def count_pairs(assignments, v1, v2, M):\n assert v1 != v2\n pairs = assignments[:, v1].astype(np.int32) * M + assignments[:, v2]\n return np.bincount(pairs, minlength=M * M).reshape((M, M))", "def __between_cluster_distance(self,cluster_1,cluster_2,condensed_distance_matrix):\n mixed_cohesion = 0\n for c_i in cluster_1.all_elements:\n for c_j in cluster_2.all_elements:\n mixed_cohesion = mixed_cohesion + condensed_distance_matrix[c_i,c_j]\n return mixed_cohesion", "def get_co_occurrences(self, word1, word2):\n word_id1 = self._word2_contiguous_id(word1)\n word_id2 = self._word2_contiguous_id(word2)\n return self._get_co_occurrences(word_id1, word_id2)", "def __call__(self, f1, f2):\n # return len(f1.set & f2.set)\n return len(set(f1.features) & set(f2.features))", "def num_conll(self):\n pass", "def percent_identity(align_1, align_2):\n matches = 0\n for i in range(len(align_1)):\n if align_1[i] == align_2[i]:\n matches+= 1\n percent_identity = matches / len(align_1)\n return percent_identity", "def set_consist(ss, ia, ib, input1, input2):\n comf3 = open(input2).readlines()\n comf1 = open(input1).readlines()\n\n \"\"\"\n get module identifiers\n \"\"\"\n comm1_array = []\n comm3_array = []\n\n for line in comf1:\n a, b = map(int, line.split())\n comm1_array.append(b)\n\n comm1_array.append(comm1_array[len(comm1_array)-1])\n\n for line in comf3:\n a, b = map(int, line.split())\n comm3_array.append(b)\n\n\n \"\"\"\n Make dictionaries. module numbers are keys and voxels in modules are values\n \"\"\" \n mod3_dict = {}\n mod1_dict = {}\n for i in set(comm3_array):\n mod3_dict[i] = [v for v, c in enumerate(comm3_array) if c == i]\n for i in set(comm1_array):\n mod1_dict[i] = [v for v, c in enumerate(comm1_array) if c == i]\n\n\n \"\"\"\n For each voxel, find its module in condition3, then in condition1, and interset voxels in its module in condition3 with condition1\n \"\"\"\n preservation = []\n for i in xrange(len(comm3_array)):\n if len(mod3_dict[comm3_array[i]]) < 20 or len(mod1_dict[comm1_array[i]]) < 20:\n preservation.append(777)\n else:\n inter = len(set(mod3_dict[comm3_array[i]]).intersection(set(mod1_dict[comm1_array[i]])))\n preservation.append(round(inter / float(len(mod3_dict[comm3_array[i]])), 4))\n\n pres_out = \"\"\n for line in preservation:\n pres_out += str(round(line,4))+\"\\n\"\n\n #outname = os.environ[\"state\"]+\"/\"+ss+\"/modularity5p/set_consistency/preserved_iters_\"+ia+\"_\"+ib+\"_\"+ss+\".txt\"\n #outname = os.environ[\"state\"]+\"/\"+ss+\"/modularity5p/set_consistency/iter\"+ia+\"_\"+ss+\"_preserved.txt\"\n outname = os.environ[\"state\"]+\"/\"+ss+\"/modularity5p/set_consistency2/iter\"+ia+\"_\"+ss+\"_preserved.txt\"\n outf = open(outname, \"w\")\n outf.write(pres_out)\n outf.close()", "def computeCriteria(seg1,seg2,mergedSegments,weights):\n criteronScores = [\n profileSim(seg1,[seg2],updatedSpeed),\n directtion(seg1,[seg2],mergedSegments),\n shareNoEdges(seg1,[seg2],mergedSegments)\n ]\n return sum(criteronScores*weights)", "def common(self):", "def get_common_snps(self):\n\n for chromosome in self.snpsites.keys():\n for position in self.snpsites[chromosome].keys():\n if all(self.snpsites[chromosome][position]) == True:\n # lets check if all alt bases are same\n alt_snps = []\n for index in range(len(self.snpsites[chromosome][position])):\n alt_snps.append(\n self.snp_positions[self.vcffilenames[index]][chromosome][\n position\n ][\"alt\"]\n )\n\n counts = self.count_list_elements_occurrences(alt_snps)\n\n for countindex in range(len(counts)):\n if counts[countindex] == len(self.vcffilenames):\n self.snp_positions[self.vcffilenames[countindex]][\n chromosome\n ][position].update({\"common\": True})", "def count_all(a, b):\n return len([1 for w in b if w == a])", "def _n_workers(self, processes: int = 2) -> int:\n if 2 <= processes <= cpu_count():\n n_workers = processes\n else:\n n_workers = cpu_count()\n return n_workers", "def _get_number_of_gpu_devices_connected(self):\n gpu_devices = self._get_gpu_pci_devices()\n gpu_devices_count = len(gpu_devices)\n return {'pci_gpu_devices': gpu_devices_count}", "def count_task2_group(answers):\n return len(set.intersection(*answers))", "def count(self) -> Tuple[groupable, pdarray]:\n repMsg = generic_msg(\n cmd=\"countReduction\",\n args={\"segments\": cast(pdarray, self.segments), \"size\": self.length},\n )\n self.logger.debug(repMsg)\n return self.unique_keys, create_pdarray(repMsg)", "def _make_comms(n_spokes, fullcomm=None):\n if not haveMPI:\n raise RuntimeError(\"make_comms called, but cannot import mpi4py\")\n # Ensure that the proper number of processes have been invoked\n nsp1 = n_spokes + 1 # Add 1 for the hub\n if fullcomm is None:\n fullcomm = MPI.COMM_WORLD\n n_proc = fullcomm.Get_size() \n if n_proc % nsp1 != 0:\n raise RuntimeError(f\"Need a multiple of {nsp1} processes (got {n_proc})\")\n\n # Create the strata_comm and cylinder_comm\n # Cryptic comment: intra is vertical, inter is around the hub\n global_rank = fullcomm.Get_rank()\n strata_comm = fullcomm.Split(key=global_rank, color=global_rank // nsp1)\n cylinder_comm = fullcomm.Split(key=global_rank, color=global_rank % nsp1)\n return strata_comm, cylinder_comm", "def num_cores(self):\n return self.mpi_procs * self.omp_threads", "def Number_elements(file1,file2):\n start = time.time()\n\n verified_element = np.intersect1d(np.array(file1), np.array(file2)) \n\n print(len(verified_element))\n print(f'Duration: {time.time() - start} seconds')", "def elementCom(Paire1,Paire2) :\n elem_com=\" \"\n elementPaire1=\" \"\n elementPaire2=\" \"\n p1 = Paire1[1]\n p2 = Paire2[1]\n if p1 != p2 :\n for i in range (2):\n for j in range (2):\n if p1[i] == p2[j]:\n elem_com = p1[i] \n elementPaire1 = p1[1-i] \n elementPaire2 = p2[1-j] \n return elem_com, elementPaire1, elementPaire2", "def distance(mass_1: ObjectMass, mass_2: ObjectMass) -> int:\n\n # collect orbit hops\n orbits_1 = mass_1.get_orbit_hops()\n\n orbits_2 = mass_2.get_orbit_hops()\n\n # find common orbit hop with least amount of hops\n common_hops: set = orbits_1.keys() & orbits_2.keys()\n\n hop = common_hops.pop()\n smallest_total_hops = orbits_1[hop] + orbits_2[hop]\n for hop in common_hops:\n total_hops = orbits_1[hop] + orbits_2[hop]\n\n if total_hops < smallest_total_hops:\n smallest_total_hops = total_hops\n\n return smallest_total_hops", "def common_token_len_with_slash(self, w1, d1, w2, d2):\n w1_tk = set(re.split(\"[\\s-]\", w1))\n w2_tk = set(re.split(\"[\\s-]\", w2))\n common_len = len(w1_tk.intersection(w2_tk))\n return common_len", "def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count", "def pair_idx(rows, comm=None):\n raise Exception(\"Not implemented\")\n \n if comm == None:\n comm = MPI.COMM_WORLD\n \n total = comb(rows,2,exact=True)\n size = comm.Get_size()\n \n size = 1000\n \n print(total / size)\n \n target = total / size\n \n current_row = 0\n calc_list = []\n row_list = [[] for x in range(size)]\n for rank in range(size):\n row_list[rank].append(current_row)\n \n current_calcs = 0\n \n for value in range(current_row, rows):\n current_calcs += value\n if current_calcs > target:\n if rank == size-1:\n pass\n else:\n break\n \n calc_list.append(current_calcs)\n row_list[rank].append(value)\n current_row = value\n \n return row_list,calc_list", "def sim_distance(p1, p2):\n # Get the list of shared_items\n #print '-- sim_distance', p1, p2\n si = [item for item in p1 if item in p2]\n\n if len(si) != 0:\n squares = [pow(p1[item] - p2[item], 2) for item in si]\n # Add up the squares of all the differences\n sum_of_squares = sum(squares)\n return 1 / (1 + np.sqrt(sum_of_squares))\n return 0", "def countingPointMutations(seq1, seq2):\n seqLength = len(list(seq1))\n \n hammingDistance=0;\n for i in range(0,seqLength):\n if list(seq1)[i]!=list(seq2)[i]:\n hammingDistance = hammingDistance+1;\n return hammingDistance", "def common_hypernyms(self, other):\n return set(self.all_hypernyms()).intersection(set(other.all_hypernyms()))", "def combined_step_count(intersection_coords, wire_one_map, wire_two_map):\n return wire_one_map[intersection_coords] + wire_two_map[intersection_coords]", "def solve_part_two(wire_one_map, wire_two_map):\n return min([combined_step_count(intersection_coords, wire_one_map, wire_two_map) for intersection_coords in find_intersection(wire_one_map, wire_two_map)])", "def number_strongly_connected_components(G):\n return len(strongly_connected_components(G))", "def num_processes(self, new_value):", "def compare_two_grids( treecode1, treecode2 ):\n import numpy as np\n\n common_blocks = 0\n\n for i in range(treecode1.shape[0]):\n # we look for this tree code in the second array\n code1 = treecode1[i,:]\n\n for j in range(treecode2.shape[0]):\n code2 = treecode2[j,:]\n if np.linalg.norm( code2-code1 ) < 1.0e-13:\n # found code1 in the second array\n common_blocks += 1\n break\n\n print( \"Nblocks1=%i NBlocks2=%i common blocks=%i\" % (treecode1.shape[0], treecode2.shape[0], common_blocks) )\n\n return common_blocks / treecode1.shape[0]", "def subcontract(a, b):\n for x in a:\n for y in b:\n if common_side(x, y):\n return 1\n return 0", "def marked_pair_counts(sample1, sample2, rbins, period, num_threads,\\\n do_auto, do_cross, marks1, marks2, wfunc, _sample1_is_sample2):\n \n #add ones to weights, so returned value is return 1.0*1.0\n marks1 = np.vstack((marks1,np.ones(len(marks1)))).T\n marks2 = np.vstack((marks2,np.ones(len(marks2)))).T\n \n if do_auto==True:\n D1D1 = marked_npairs(sample1, sample1, rbins,\\\n weights1=marks1, weights2=marks1,\\\n wfunc = wfunc,\\\n period=period, num_threads=num_threads)\n D1D1 = np.diff(D1D1)\n else:\n D1D1=None\n D2D2=None\n \n if _sample1_is_sample2:\n D1D2 = D1D1\n D2D2 = D1D1\n else:\n if do_cross==True:\n D1D2 = marked_npairs(sample1, sample2, rbins,\\\n weights1=marks1, weights2=marks2,\\\n wfunc = wfunc,\\\n period=period, num_threads=num_threads)\n D1D2 = np.diff(D1D2)\n else: D1D2=None\n if do_auto==True:\n D2D2 = marked_npairs(sample2, sample2, rbins,\\\n weights1=marks2, weights2=marks2,\\\n wfunc = wfunc,\\\n period=period, num_threads=num_threads)\n D2D2 = np.diff(D2D2)\n else: D2D2=None\n \n return D1D1, D1D2, D2D2", "def common_prefix_len(self, other: \"ProofPath\") -> int:\n if self.start() == other.start():\n return self.match_len(other, self.start())\n\n return 0", "def p_obj1_given_no_obj2(self, obj1, obj2):\n if obj2 in self.prior[obj1]:\n obj1_and_obj2_count = self.prior[obj1][obj2]\n else:\n obj1_and_obj2_count = 0\n\n p = (self.sums[obj1] - obj1_and_obj2_count) / float(self.total_objects - obj1_and_obj2_count)\n assert 0 <= p and p <= 1, (p, obj1, obj2)\n return p", "def report(self):\n print('total 1', len(self.videoids1))\n print('total 2', len(self.videoids2))\n print('total of repeats in_1', len(self.videoids_dict_repeats1))\n print('total of repeats in_2', len(self.videoids_dict_repeats2))\n print('total in_1_missing_in_2', len(self.in_1_missing_in_2))\n print('total in_2_missing_in_1', len(self.in_2_missing_in_1))", "def common_nbrs(self, u, v):\n u_adj = self.nx_graph.neighbors(u)\n v_adj = self.nx_graph.neighbors(v)\n nbrs = []\n for u in u_adj:\n if u in v_adj:\n nbrs.append(u)\n\n return nbrs, u_adj, v_adj", "def components(self):\n comps = 0\n unexplored = self.nodes()\n while unexplored:\n comps += 1\n queue = {unexplored.pop()}\n while queue:\n new = queue.pop()\n unexplored.remove(new)\n for adjacent in new.parents() | new.children():\n if adjacent in unexplored:\n queue.add(adjacent)\n return comps", "def percent_overlap(items1, items2, k = None):\n if k is None:\n k = max([len(items1), len(items2)])\n assert k > 0 and k <= max([len(items1), len(items2)]), 'k is out of bounds!'\n items1_set, items2_set = set(items1[:k]), set(items2[:k])\n return len(items1_set & items2_set) / len(items1_set | items2_set)", "def count_difference(patch1, patch2):\n\n\treturn np.sum(np.square(patch1 - patch2))", "def count_same(pairs):\n same_count = 0\n for x, y in pairs:\n if x == y:\n same_count = same_count + 1\n return same_count", "def number_weakly_connected_components(G):\n return sum(1 for wcc in weakly_connected_components(G))", "def count_matching_genes(genome1, genome2):\n count = 0\n\n inno1 = max(genome1.nodes.keys())\n inno2 = max(genome2.nodes.keys())\n\n for i in range(max(inno1, inno2) + 1):\n n1 = genome1.nodes.get(i, None)\n n2 = genome2.nodes.get(i, None)\n if not (n1 is None or n2 is None):\n count += 1\n\n inno1 = max(genome1.connections.keys())\n inno2 = max(genome2.connections.keys())\n\n for i in range(max(inno1, inno2) + 1):\n c1 = genome1.connections.get(i, None)\n c2 = genome2.connections.get(i, None)\n if not (c1 is None or c2 is None):\n count += 1\n\n return count", "def num_native (conformation, protein_name = 'mer15'):\n from src.energy import getCoord as getCoord\n \n x,y,z = getCoord(conformation) \n native_list = get_native_list (protein_name)\n \n count = 0 \n for e in native_list:\n i = e[0]\n j = e[1]\n count += isContact(i,j, conformation)\n return count", "def count():", "def SecondPart():\n return countAllBagsIn(targetBag, organizedBags)", "def calculate_components(self, parts):\n target = {}\n for part in parts:\n rank = part[0]\n\n try:\n face = part[1]\n except IndexError:\n face = '*'\n\n try:\n target[rank][face] += 1\n except KeyError:\n if rank not in target:\n target[rank] = {}\n target[rank][face] = 1\n\n return target" ]
[ "0.71106726", "0.64436406", "0.6228776", "0.6228776", "0.62096745", "0.6076676", "0.60679585", "0.59844345", "0.59690386", "0.5953923", "0.5925899", "0.5922579", "0.59189546", "0.5907016", "0.58420223", "0.5753761", "0.57528996", "0.5745631", "0.574225", "0.5684263", "0.56555855", "0.56460875", "0.56446713", "0.5644042", "0.5632925", "0.562713", "0.5591475", "0.55852157", "0.55772763", "0.5530146", "0.55288094", "0.5491559", "0.5484253", "0.5475631", "0.5469677", "0.54583365", "0.5456889", "0.54535264", "0.5448934", "0.5429339", "0.5423434", "0.54223484", "0.54191977", "0.54184264", "0.5415111", "0.5396498", "0.53939176", "0.53867793", "0.5370391", "0.53552556", "0.534171", "0.53374934", "0.53235704", "0.53097594", "0.5308478", "0.53069824", "0.53055", "0.5299103", "0.5297021", "0.5294641", "0.52893746", "0.5282309", "0.52817255", "0.52810705", "0.5275639", "0.52755356", "0.52642256", "0.52624434", "0.5260421", "0.52559704", "0.52495587", "0.5241365", "0.5236542", "0.52306616", "0.52255285", "0.5206452", "0.5203818", "0.52024716", "0.51912034", "0.51891273", "0.51855284", "0.5184094", "0.5179271", "0.5178624", "0.51780283", "0.51683694", "0.5163708", "0.51623845", "0.51586914", "0.515837", "0.5154683", "0.51483387", "0.51380146", "0.51240903", "0.5118379", "0.51132566", "0.51039743", "0.5102031", "0.5094595", "0.5094355" ]
0.71126
0
Compare two mpi communicators. Returns true if the two communicators are handles for the same group of proc and for the same communication context.
def compare_comm(comm_1, comm_2): assert comm_1 != MPI.COMM_NULL assert comm_2 != MPI.COMM_NULL result = MPI.Comm.Compare(comm_1, comm_2) res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL] return result == res[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare_groups(comm_1, comm_2):\n assert comm_1 != MPI.COMM_NULL\n assert comm_2 != MPI.COMM_NULL\n result = MPI.Comm.Compare(comm_1, comm_2)\n res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL]\n return result in res[:-1]", "def has_mpi_peer_processes():\n return mpi4py_available and MPI.COMM_WORLD.Get_size() > 1", "def is_parent(child, parent):\n # Get the list of processes\n assert child is not None\n assert parent is not None\n #child_ranks = [i for i in xrange(child.Get_size())]\n child_group = child.Get_group()\n parent_group = parent.Get_group()\n inter_group = MPI.Group.Intersect(child_group, parent_group)\n return child_group.Get_size() == inter_group.Get_size()", "def can_communicate_with(self, target):\n if self == target:\n return True\n msg = 'You try to connect topologies belonging to'\n msg += ' two different mpi tasks. Set taskids properly or use'\n msg += ' InterBridge.'\n assert self.task_id() == target.task_id(), msg\n\n # Parent communicator\n # Todo : define some proper conditions for compatibility\n # between topo_from, topo_to and parent:\n # - same size\n # - same domain\n # - common processus ...\n # At the time we check that both topo have\n # the same comm_origin.\n return self.is_consistent_with(target)", "def __eq__(self, other):\n return self.conn == other.conn and self.p1 == other.p1 and self.p2 == other.p2", "def hybrid_mpi_omp(self):\n return self.has_omp and self.has_mpi", "def _compare(smi1, smi2):\n return _canonicalize(smi1) == _canonicalize(smi2)", "def Mirrorprocs(p1, p2):\n return False", "def _fake_message_compare(m1, m2):\r\n m1 = m1.serialize()\r\n m2 = m2.serialize()\r\n diff = False\r\n for i in range(len(m1)):\r\n if m1[i] is None:\r\n continue\r\n if m1[i] != m2[i]:\r\n diff = True\r\n break\r\n return not diff", "def isHandle(self):\n return self.type in mpi_handle_types", "def mutexPropositions(prop1, prop2, mutexActions):\n for a1 in prop1.getProducers():\n for a2 in prop2.getProducers():\n if Pair(a1, a2) not in mutexActions:\n return False\n return True", "def _on_same_device(self, other: \"PArray\") -> bool:\n this_device = self._current_device_index\n return this_device in other._array", "def object_communicator():\n comm = MPI.COMM_WORLD", "def basic_compare(self, other: \"Molecule\") -> bool:\n return self.inchi_key[:14] == other.inchi_key[:14]", "def is_identical(self, other):\n return (self.compounddatatype == other.compounddatatype and\n self.min_row == other.min_row and\n self.max_row == other.max_row)", "def __eq__(self, other):\n for ls, lo in zip(self.leaderboard_names, other.leaderboard_names):\n if ls != lo:\n return False\n for ls, lo in zip(self.leaderboard_groups, other.leaderboard_groups):\n if ls != lo:\n return False\n if self.top_left != other.top_left:\n return False\n if self.bottom_right != other.bottom_right:\n return False\n return True", "def __eq__(self, other):\n if not isinstance(other, MessageGroup):\n return False\n\n return self.__dict__ == other.__dict__", "def e_paralelo(self, other):\n if (self == other) or (self.normaliza() == other.normaliza()):\n return True\n else:\n return False", "def pure_mpi(self):\n return self.has_mpi and not self.has_omp", "def compare(self, other_group):\n x_bounds = self.bounding_box_x_len == other_group.bounding_box_x_len\n y_bounds = self.bounding_box_y_len == other_group.bounding_box_y_len\n same_num_cells = self.num_colored_cells == other_group.num_colored_cells\n if not (x_bounds and y_bounds and same_num_cells):\n return False\n for row_ind in range(len(other_group.cells)):\n for col_ind in range(len(other_group.cells[0])):\n if other_group.cells[row_ind][col_ind] != self.cells[row_ind][col_ind]:\n return False\n return True", "def is_mpi_env():\n try:\n import mpi4py\n except ImportError:\n return False\n\n try:\n import mpi4py.MPI\n except ImportError:\n return False\n\n if mpi4py.MPI.COMM_WORLD.size == 1 and mpi4py.MPI.COMM_WORLD.rank == 0:\n return False\n return True", "def __eq__(self, other):\n if not isinstance(other, WrappedChannel):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return isinstance(other, type(self)) and set(self.channels) == set(other.channels)", "def __eq__(self, other):\n return isinstance(other, Procedure) and self.__uuid == other.uuid", "def are_equal(self, sp1, sp2):\n return True", "def are_equal(self, sp1, sp2):\n return sp1 == sp2", "def are_connected(self, name1, name2):", "def intersection_size(comm_1, comm_2):\n if comm_1 == MPI.COMM_NULL or comm_2 == MPI.COMM_NULL:\n return None\n group_1 = comm_1.Get_group()\n group_2 = comm_2.Get_group()\n inter_group = MPI.Group.Intersect(group_1, group_2)\n return inter_group.Get_size()", "def __eq__(self, other):\r\n return (type(self) == type(other) and\r\n other.broadcastable == self.broadcastable)", "def check_comm(instance):\n\n comm = instance.__dict__.get(\"commodity\")\n\n accounted_comm = set()\n\n for c in comm:\n\n for r in instance.reactions:\n r_dict = r.__dict__\n\n for label, species in r_dict.items():\n\n if instance.__dict__.get(\"recombination\") == Recomb_1:\n product = r_dict.get(\"left2\")\n\n else:\n product = r_dict.get(\"right2\")\n\n if product == c:\n accounted_comm.add(c)\n\n if set(comm) == accounted_comm:\n return True\n else:\n print(\"Commodity:\", set(comm))\n print(\"Commodity products made:\", accounted_comm)\n return False", "def _cmp_(self, other):\n return cmp(self.matrix(), other.matrix())", "def __eq__(self, other):\n parent_same = self.parent1.rid == other.parent1.rid \\\n and self.parent2.rid == other.parent2.rid\n\n parents_opposite = self.parent2.rid == other.parent1.rid \\\n and self.parent1.rid == other.parent2.rid\n\n return parent_same or parents_opposite", "def are_equal(self, sp1, sp2):\n return", "def __eq__(self, other):\n return np.all(self.grid == other.grid) and np.all(self.pos == other.pos)", "def comaIsSymmetric(self):\n\t\tfor i in range(2*self.totalBins):\n\t\t\tfor j in range(2*self.totalBins):\n\t\t\t\tif not self.coma[i,j] == self.coma[j,i]:\n\t\t\t\t\tprint i,j,self.coma[i,j],self.coma[j,i]\n\t\t\t\t\treturn False\n\t\treturn True", "def check_if_equal(self, other):\n if self.get_time_left() == other.get_time_left():\n if len(self.balls) == len(other.balls) and len(self.hexagons) == len(other.hexagons):\n for player in self.players:\n does_have_equal_player = False\n for other_player in other.players:\n if player == other_player:\n does_have_equal_player = True\n break\n if not does_have_equal_player:\n return False\n for bubble in self.balls:\n does_have_equal_bubble = False\n for other_bubble in other.balls:\n if bubble == other_bubble:\n does_have_equal_bubble = True\n break\n if not does_have_equal_bubble:\n return False\n for bubble in self.hexagons:\n does_have_equal_bubble = False\n for other_bubble in other.hexagons:\n if bubble == other_bubble:\n does_have_equal_bubble = True\n break\n if not does_have_equal_bubble:\n return False\n return True\n return False", "def _compareObj(self, other):\n if not isinstance(other, GatePulsePair):\n raise Error.ArgumentError(f\"GatePulsePair object can not compare with a {type(other)}.\")\n if isinstance(other.cirLine.data, FixedGateOP) and isinstance(self.cirLine.data, RotationGateOP):\n return False\n if isinstance(other.cirLine.data, RotationGateOP) and isinstance(self.cirLine.data, FixedGateOP):\n return False\n if other.cirLine.data.name != self.cirLine.data.name:\n return False\n if other.cirLine.qRegIndexList != self.cirLine.qRegIndexList:\n return False\n if isinstance(self.cirLine.data, RotationGateOP):\n argLen = len(other.cirLine.data.uGateArgumentList)\n for idx in range(argLen):\n verify = abs(other.cirLine.data.uGateArgumentList[idx] -\n self.cirLine.data.uGateArgumentList[idx])\n if verify > sys.float_info.epsilon:\n return False\n return True", "def connected(self, p, q):\n return self.find(p) == self.find(q)", "def connected(self, p, q):\n return self.find(p) == self.find(q)", "def connected(self, p, q):\n return self.find(p) == self.find(q)", "def _shape_compare(shape1, shape2):\n if len(shape1) != len(shape2):\n return False\n for s1, s2 in zip(shape1, shape2):\n if s1 != s2:\n return False\n return True", "def is_different_server(self, other):\n return self.command_port != other.command_port", "def equals(self, other):\n if not isinstance(other, PermutationGroup):\n return False\n\n set_self_gens = set(self.generators)\n set_other_gens = set(other.generators)\n\n # before reaching the general case there are also certain\n # optimisation and obvious cases requiring less or no actual\n # computation.\n if set_self_gens == set_other_gens:\n return True\n\n # in the most general case it will check that each generator of\n # one group belongs to the other PermutationGroup and vice-versa\n for gen1 in set_self_gens:\n if not other.contains(gen1):\n return False\n for gen2 in set_other_gens:\n if not self.contains(gen2):\n return False\n return True", "def compare(obj_a, obj_b):\n\n return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) == 'NVC')", "def isComrade(self, other): # are the pieces comrades ?\r\n \r\n if self.name == other.name: \r\n return True\r\n else:\r\n return False", "def are_equal(self, sp1, sp2):\n set1 = set(sp1.elements)\n set2 = set(sp2.elements)\n return set1.issubset(set2) or set2.issubset(set1)", "def __eq__(self, other):\n return self.storage_ip == other.storage_ip and self.client_port == other.client_port and self.command_port == other.command_port and self.files == other.files", "def __eq__(self, other):\n return self.storage_ip == other.storage_ip and self.client_port == other.client_port", "def comp(p1,p2,adj,perm):\r\n #degree of p1\r\n f1 = 0\r\n #degree of p2\r\n f2 = 0\r\n \r\n #compute the degrees\r\n for i in range(m):\r\n if (V[p1],V[i]) in adj or (V[i],V[p1]) in adj:\r\n f1 += 1\r\n\r\n for i in range(m):\r\n if (V[p2],V[i]) in adj or (V[i],V[p2]) in adj:\r\n f2 += 1\r\n \r\n if f2 > f1:\r\n return True\r\n else:\r\n return False", "def is_potential_group(self, player: int, row: int, col: int, row_diff: int, col_diff: int):\n opponent = 1 - player\n for _ in range(4):\n square = Square(row, col)\n if not self.is_valid(square):\n return False\n if self.state[opponent][row][col]:\n # If there is a token that belongs to the opponent in this group,\n # then this group is not a potential group that belongs to the given player.\n return False\n row, col = row + row_diff, col + col_diff\n return True", "def isClientMultiplexingInterface(self):\n adaptation = self.getServerAdaptationFunction()\n if adaptation == None:\n return False # no adaptatation underneath\n else:\n clientcount = adaptation.getClientCount() # max. number of clients; None means unlimited\n return (clientcount != 1)", "def is_component_to_component_message(self) -> bool:\n return self.is_to_public_id and self.is_sender_public_id", "def __eq__(self, other: 'GatewayPortIdentity') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "def comm_group(self):\n return self._gcomm", "def isparallel(p1, p2, tol=10*_eps):\n \n return np.linalg.norm(np.cross(p1.w, p2.w) ) < tol", "def is_connected(self, p, q):\n self._validate(p)\n self._validate(q)\n return self.find(p) == self.find(q)", "def equivalent(kls, first, second):\n if first.empty() and second.empty():\n return True\n elif first.vertices.shape[0] != second.vertices.shape[0]:\n return False\n elif first.edges.shape[0] != second.edges.shape[0]:\n return False\n\n EPSILON = 1e-7\n\n vertex1, ct1 = np.unique(first.vertices, axis=0, return_counts=True)\n vertex2, ct2 = np.unique(second.vertices, axis=0, return_counts=True)\n \n vertex_match = np.all(np.abs(vertex1 - vertex2) < EPSILON)\n ct_match = np.all(ct1 == ct2)\n if not (vertex_match and ct_match):\n return False\n\n g1 = nx.Graph()\n g1.add_edges_from(first.edges)\n g2 = nx.Graph()\n g2.add_edges_from(second.edges)\n edges_match = nx.is_isomorphic(g1, g2)\n del g1 \n del g2\n\n if not edges_match:\n return False\n\n second_verts = {}\n for i, vert in enumerate(second.vertices):\n second_verts[tuple(vert)] = i\n \n attrs = [ attr['id'] for attr in first.extra_attributes ]\n for attr in attrs:\n buf1 = getattr(first, attr)\n buf2 = getattr(second, attr)\n if len(buf1) != len(buf2):\n return False\n\n for i in range(len(buf1)):\n i2 = second_verts[tuple(first.vertices[i])]\n if buf1[i] != buf2[i2]:\n return False\n\n return True", "def __eq__(self, other):\n if not isinstance(other, ShowServerGroupResult):\n return False\n\n return self.__dict__ == other.__dict__", "def connected(geo, stereo=True):\n return len(components_graph(geo, stereo=stereo)) == 1", "def __eq__(self, other):\n if not isinstance(other, MultiConcatInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if isinstance(other, CNPJ):\n return self.cnpj == other.cnpj\n return False", "def commutes_with(self, other):\n a = self.array_form\n b = other.array_form\n if len(a) != len(b):\n raise ValueError(\"The number of elements in the permutations \\\ndon\\'t match.\")\n for i in range(len(a)-1):\n if a[b[i]] != b[a[i]]:\n return False\n return True", "def __eq__(self, other):\n if not isinstance(other, CfbOddsGameBettingSplit):\n return False\n\n return self.__dict__ == other.__dict__", "def compare_topology(tree1, tree2):\n n2p1, n2p2 = ({node.name: node.parent.name\n for node in tree.traverse() if not node.is_root()}\n for tree in (tree1, tree2))\n return n2p1 == n2p2", "def has_interacted(self, other):\n if other in self._memory:\n if self.get_memory_of(other).has_interacted():\n return True\n else:\n return False\n else:\n return False", "def same_rank(self, other: 'Piece') -> bool:\n\n return self.rank == other.rank", "def similar(self, other):\r\n if self.rows == other.rows and self.columns == other.columns:\r\n return True\r\n else:\r\n return False", "def is_peered_with(self, other: SkupperSite) -> bool:\n if not self.cluster.peering:\n return False\n\n for c in self.cluster.peering.connections:\n if (\n isinstance(\n c,\n (\n ClusterPeeringConnectionClusterRequesterV1,\n ClusterPeeringConnectionClusterAccepterV1,\n ),\n )\n ) and c.cluster.name == other.cluster.name:\n return True\n return False", "def has_mpi(self):\n return bool(self.mpi_runner)", "def __eq__(self, other):\n return (type(self) == type(other) and\n (self.from_grid == other.from_grid) and\n (self.to_grid == other.to_grid))", "def __eq__(self, other):\n if isinstance(other, _DirectConnection):\n return self._callback_ref == other._callback_ref\n return False", "def isomorphic(self, p1, p2, same_nameness=False):\r\n\r\n if p1 is None and p2 is None:\r\n return True\r\n if p1 is None or p2 is None:\r\n return False\r\n\r\n if type(p1) is self.Atom and type(p2) is self.Atom:\r\n if p1.predicate == p2.predicate:\r\n if same_nameness:\r\n if p1.is_name == p2.is_name:\r\n return True\r\n return False\r\n return True\r\n return False\r\n if type(p1) is self.Atom or type(p2) is self.Atom:\r\n return False\r\n\r\n if p1.type == p2.type:\r\n return self.isomorphic(p1.v1, p2.v1) and self.isomorphic(p1.v2, p2.v2)\r\n return False", "def commutes_with(self, gate: PowerMatrixGate, atol: float = 1e-7) -> bool:\n from string import ascii_lowercase as alc, ascii_uppercase as auc\n\n # Check both gates have qubits\n if self.qubits is None or gate.qubits is None:\n raise ValueError(\"Cannot check commutation between virtual gates.\")\n\n # Get shared qubits\n shared_qubits = sort(set(self.qubits).intersection(gate.qubits))\n\n # If no qubits are shared, the gates definitely commute\n if not shared_qubits:\n return True\n\n # Rename\n g1, g2 = self, gate\n\n # Get all qubits\n q12 = tuple(sort(set(g1.qubits + g2.qubits)))\n\n # Get number of qubits\n n12 = len(q12)\n\n # Get unitaries\n U1 = np.reshape(g1.matrix(), (2,) * 2 * g1.n_qubits)\n U2 = np.reshape(g2.matrix(), (2,) * 2 * g2.n_qubits)\n\n # Define how to multiply matrices\n def _mul(w1, w2):\n # Get qubits and unitaries\n q1, U1 = w1\n q2, U2 = w2\n\n # Get number of qubits\n n1 = len(q1)\n n2 = len(q2)\n\n # Construct map\n _map = ''\n _map += ''.join(alc[q12.index(q)] for q in q1)\n _map += ''.join(auc[-shared_qubits.index(q) -\n 1 if q in shared_qubits else q12.index(q)]\n for q in q1)\n _map += ','\n _map += ''.join(auc[-shared_qubits.index(q) -\n 1] if q in shared_qubits else alc[q12.index(q)]\n for q in q2)\n _map += ''.join(auc[q12.index(q)] for q in q2)\n _map += '->'\n _map += ''.join(alc[x] for x in range(n12))\n _map += ''.join(auc[x] for x in range(n12))\n\n # Multiply map\n return np.einsum(_map, U1, U2)\n\n # Compute products\n P1 = _mul((g1.qubits, U1), (g2.qubits, U2))\n P2 = _mul((g2.qubits, U2), (g1.qubits, U1))\n\n # Check if the same\n return np.allclose(P1, P2, atol=1e-5)", "def _cmp_(self, other):\n if(not isinstance(other, VVHarmonicWeakMaassForms)):\n return False\n eq = (self.multiplier() == other.WR) and (self._weight_rat == other._weight_rat)\n eq = eq and (self.prec == other.prec) and (self._sym_type == other._sym_type)\n eq = eq and (self._is_dual_rep == other._is_dual_rep)\n return eq", "def __or__(p1, p2):\n return p1.isparallel(p2)", "def __eq__(self, other):\n if not isinstance(other, SharedContainerPort):\n return False\n\n return self.to_dict() == other.to_dict()", "def getSyncFor (self, conn) :\r\n for pw, _conn in self.clients :\r\n if _conn and _conn.getSyncInfo(conn) :\r\n self.ongoing_sync_count += 1\r\n return True\r\n \r\n return False", "def qf_connected(self, p, q):\n return self.id[p] == self.id[q]\n #return self.id[p] == self.id[q]", "def is_consistent_with(self, target):\n same_parent = self.parent() == target.parent()\n # Note FP. Is it really required to have the\n # same parent? Inclusion of all proc may be enough?\n return npw.equal(self.shape, target.shape).all() and same_parent", "def is_identical(self, other):\n if self.user != other.user:\n return False\n\n my_xputs = itertools.chain(self.inputs.order_by(\"dataset_idx\"), self.outputs.order_by(\"dataset_idx\"))\n other_xputs = itertools.chain(other.inputs.order_by(\"dataset_idx\"), other.outputs.order_by(\"dataset_idx\"))\n for my_xput, other_xput in zipper(my_xputs, other_xputs, fillvalue=None):\n if my_xput is None or other_xput is None or not my_xput.is_identical(other_xput):\n return False\n return True", "def is_synchronized(self):\n up = self.upper_binary_tree()\n down = self.lower_binary_tree()\n return down.canopee() == up.canopee()", "def __eq__(self, other):\n return isinstance(other, type(self)) and (self.data_store, self.unique_id) == (\n other.data_store,\n other.unique_id,\n )", "def __eq__(self, other):\n return (type(self) == type(other) and\n self.from_grid == other.from_grid and\n self.to_grid == other.to_grid)", "def __eq__(self, other):\n return (type(self) == type(other) and\n self.from_grid == other.from_grid and\n self.to_grid == other.to_grid)", "def compare(self, x, y):\n return (self.ordering[x][y] is True) or (x == y)", "def is_connected(object_one, object_two):\n\n for vert_one in object_one.Vertexes:\n for vert_two in object_two.Vertexes:\n if (vert_one.X == vert_two.X) and (vert_one.y == vert_two.y):\n return True\n\n return False", "def is_similar_with(self, other):\n\n # corresponding angles are congruent\n if self.angles != other.angles:\n return False\n # corresponding sides are proportional\n proportion = self.perimeter() / other.perimeter()\n for i in range(len(self.lengths)):\n if self.lengths[i]/other.lengths[i] != proportion:\n return False\n return True", "def grid_equal(grid1, grid2):\r\n for i in range(len(grid1)):\r\n for j in range(len(grid1[i])):\r\n if grid1[i][j] != grid2[i][j]:\r\n return False\r\n return True", "def same_player(self, other):\n return self.name == other.name \\\n and self.color == other.color", "def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n return self.mesh == other.mesh and \\\n npw.equal(self.shape, other.shape).all() and \\\n self.domain == other.domain", "def are_connected(self, node1, node2):\n return bool( self.get_edge(node1, node2) )", "def IsSimilar(self,other):\n self.__do_essential_memebers_exist__()\n other.__do_essential_memebers_exist__()\n return self.element_type == other.element_type", "def qf_connected(self, p, q):\n return self.id[p] == self.id[q]", "def is_connected(self, node1, node2):\r\n\r\n return node1 in self.graph and node2 in self.graph[node1]", "def __ne__(self, other):\n if not isinstance(other, SharedContainerPort):\n return True\n\n return self.to_dict() != other.to_dict()", "def isdisjoint(self, other):\n self._check_title(other)\n\n # sort by top-left vertex\n if self.bounds > other.bounds:\n i = self\n self = other\n other = i\n\n return (self.max_col, self.max_row) < (other.min_col, other.max_row)", "def collides(self, other):\r\n for block in self.blocks:\r\n for obstacle in other.blocks:\r\n if block.col == obstacle.col and block.row == obstacle.row:\r\n return True\r\n return False", "def __eq__(self, other):\n if not isinstance(other, IQueryUserPartnerCouponsResultV2):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return (type(self) == type(other) and\n self.from_grid == other.from_grid and\n self.to_grid == other.to_grid and\n self.m == other.m and\n self.n == other.n)", "def __eq__(self, other):\n if not isinstance(other, ConversationParticipant):\n return False\n\n return self.__dict__ == other.__dict__" ]
[ "0.74933475", "0.61300033", "0.6039855", "0.59951395", "0.5934048", "0.58377117", "0.5620288", "0.55401844", "0.5411162", "0.5404919", "0.53369236", "0.5305763", "0.53046554", "0.52937305", "0.52503824", "0.52165604", "0.52162445", "0.51967853", "0.51824045", "0.5172295", "0.516735", "0.51657116", "0.51644564", "0.51506144", "0.5148348", "0.5147662", "0.5116553", "0.50939196", "0.5057151", "0.5055618", "0.50544834", "0.5047489", "0.50289315", "0.5024735", "0.5016658", "0.5001497", "0.50013345", "0.49961257", "0.49961257", "0.49961257", "0.49890217", "0.4984423", "0.495956", "0.49490014", "0.49336174", "0.4923855", "0.49213678", "0.49095958", "0.48860386", "0.4880225", "0.4879903", "0.48763764", "0.48712692", "0.48660657", "0.48653594", "0.48566684", "0.48544303", "0.48448837", "0.48411024", "0.482487", "0.4820488", "0.48078638", "0.48074847", "0.48063445", "0.48014858", "0.47991124", "0.47956583", "0.47854698", "0.4785239", "0.47831583", "0.47785276", "0.47765264", "0.4769811", "0.47662485", "0.47648335", "0.47562122", "0.4752008", "0.47466585", "0.47411725", "0.4737583", "0.47375417", "0.4734204", "0.47326303", "0.47326303", "0.47321013", "0.4731577", "0.47279868", "0.4726314", "0.47261855", "0.47233304", "0.47214475", "0.4718654", "0.47141984", "0.4709559", "0.47095588", "0.4709084", "0.47087902", "0.4708764", "0.4705482", "0.47050893" ]
0.76038
0
Compare the groups of two mpi communicators. Returns true if each comm handles the same group of mpi processes.
def compare_groups(comm_1, comm_2): assert comm_1 != MPI.COMM_NULL assert comm_2 != MPI.COMM_NULL result = MPI.Comm.Compare(comm_1, comm_2) res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL] return result in res[:-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare_comm(comm_1, comm_2):\n assert comm_1 != MPI.COMM_NULL\n assert comm_2 != MPI.COMM_NULL\n result = MPI.Comm.Compare(comm_1, comm_2)\n res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL]\n return result == res[0]", "def same_group(self,i,j):\n if self.group_number(i) == self.group_number(j):\n return True\n else:\n return False", "def is_parent(child, parent):\n # Get the list of processes\n assert child is not None\n assert parent is not None\n #child_ranks = [i for i in xrange(child.Get_size())]\n child_group = child.Get_group()\n parent_group = parent.Get_group()\n inter_group = MPI.Group.Intersect(child_group, parent_group)\n return child_group.Get_size() == inter_group.Get_size()", "def compare(self, other_group):\n x_bounds = self.bounding_box_x_len == other_group.bounding_box_x_len\n y_bounds = self.bounding_box_y_len == other_group.bounding_box_y_len\n same_num_cells = self.num_colored_cells == other_group.num_colored_cells\n if not (x_bounds and y_bounds and same_num_cells):\n return False\n for row_ind in range(len(other_group.cells)):\n for col_ind in range(len(other_group.cells[0])):\n if other_group.cells[row_ind][col_ind] != self.cells[row_ind][col_ind]:\n return False\n return True", "def has_mpi_peer_processes():\n return mpi4py_available and MPI.COMM_WORLD.Get_size() > 1", "def equals(self, other):\n if not isinstance(other, PermutationGroup):\n return False\n\n set_self_gens = set(self.generators)\n set_other_gens = set(other.generators)\n\n # before reaching the general case there are also certain\n # optimisation and obvious cases requiring less or no actual\n # computation.\n if set_self_gens == set_other_gens:\n return True\n\n # in the most general case it will check that each generator of\n # one group belongs to the other PermutationGroup and vice-versa\n for gen1 in set_self_gens:\n if not other.contains(gen1):\n return False\n for gen2 in set_other_gens:\n if not self.contains(gen2):\n return False\n return True", "def intersection_size(comm_1, comm_2):\n if comm_1 == MPI.COMM_NULL or comm_2 == MPI.COMM_NULL:\n return None\n group_1 = comm_1.Get_group()\n group_2 = comm_2.Get_group()\n inter_group = MPI.Group.Intersect(group_1, group_2)\n return inter_group.Get_size()", "def comm_group(self):\n return self._gcomm", "def _check_group(self):\n if len(self.groups) != 2:\n raise ValueError(\"There have to be two groups!\")\n\n # Check the number of atoms in each group is the same\n n_group1 = 0\n for key, value in self.groups[0].items():\n n_group1 += value\n\n n_group2 = 0\n for key, value in self.groups[1].items():\n n_group2 += value\n\n if n_group1 != n_group2:\n f1 = self._group2formula(self.groups[0])\n f2 = self._group2formula(self.groups[1])\n msg = \"The two groups have to have the same number of atoms.\\n\"\n msg += \"Group 1: {} Group 2: {}\".format(f1, f2)\n raise ValueError(msg)", "def check_for_group():\r\n if first_list[0][0] == second_list[0][0]:\r\n try:\r\n result = first_list[0][0], str(int(first_list[0][1]) + int(second_list[0][1]))\r\n except ValueError:\r\n result = first_list[0][0], str(float(first_list[0][1]) + float(second_list[0][1]))\r\n result_list.append(result)\r\n first_list.remove(first_list[0])\r\n second_list.remove(second_list[0])\r\n return True\r\n return False", "def can_communicate_with(self, target):\n if self == target:\n return True\n msg = 'You try to connect topologies belonging to'\n msg += ' two different mpi tasks. Set taskids properly or use'\n msg += ' InterBridge.'\n assert self.task_id() == target.task_id(), msg\n\n # Parent communicator\n # Todo : define some proper conditions for compatibility\n # between topo_from, topo_to and parent:\n # - same size\n # - same domain\n # - common processus ...\n # At the time we check that both topo have\n # the same comm_origin.\n return self.is_consistent_with(target)", "def __eq__(self, other):\n if not isinstance(other, MessageGroup):\n return False\n\n return self.__dict__ == other.__dict__", "def hybrid_mpi_omp(self):\n return self.has_omp and self.has_mpi", "def _compare(smi1, smi2):\n return _canonicalize(smi1) == _canonicalize(smi2)", "def is_potential_group(self, player: int, row: int, col: int, row_diff: int, col_diff: int):\n opponent = 1 - player\n for _ in range(4):\n square = Square(row, col)\n if not self.is_valid(square):\n return False\n if self.state[opponent][row][col]:\n # If there is a token that belongs to the opponent in this group,\n # then this group is not a potential group that belongs to the given player.\n return False\n row, col = row + row_diff, col + col_diff\n return True", "def _compare_groups_and_labels(self, groups, labels):\n # function that compares two lists without taking into account the order\n def comp_lists(l1, l2):\n len_match = len(l1) == len(l2)\n return len_match and np.all([g1 == g2 for g1, g2 in zip(l1, l2)])\n\n # comparison of the given groups\n groups_same = comp_lists(groups, self.selected_values['group_keys'])\n\n # if groups are the same, then compare the labels\n if groups_same:\n len_match = len(labels) == len(self.selected_values['labels_keys'])\n tmp = [comp_lists(g1, g2)\n for g1, g2 in zip(labels,\n self.selected_values['labels_keys'])]\n return len_match and np.all(tmp)\n else:\n return False", "def group_diff(options, db):\n nested_rvals = []\n for ip in options.gmp:\n nested_rvals.append(get_ip_parents(ip, db))\n # get just the list of groups, stripping out the networks.\n group1 = [x[0] for x in nested_rvals[0]]\n group2 = [x[0] for x in nested_rvals[1]]\n common = sorted(list(set(group1) & set(group2)))\n diff1 = sorted(list(set(group1) - set(group2)))\n diff2 = sorted(list(set(group2) - set(group1)))\n return common, diff1, diff2", "def check_comm(instance):\n\n comm = instance.__dict__.get(\"commodity\")\n\n accounted_comm = set()\n\n for c in comm:\n\n for r in instance.reactions:\n r_dict = r.__dict__\n\n for label, species in r_dict.items():\n\n if instance.__dict__.get(\"recombination\") == Recomb_1:\n product = r_dict.get(\"left2\")\n\n else:\n product = r_dict.get(\"right2\")\n\n if product == c:\n accounted_comm.add(c)\n\n if set(comm) == accounted_comm:\n return True\n else:\n print(\"Commodity:\", set(comm))\n print(\"Commodity products made:\", accounted_comm)\n return False", "def basic_compare(self, other: \"Molecule\") -> bool:\n return self.inchi_key[:14] == other.inchi_key[:14]", "def __eq__(self, other):\n if not isinstance(other, ShowServerGroupResult):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n group = self.group\n if not isinstance(other, group.dtype):\n return False\n return tuple.__eq__(self, other)", "def __gt__(self, other):\n if other.groupnumber > self.groupnumber:\n return True\n else:\n return False", "def is_group(self):\n return self._is_group", "def comaIsSymmetric(self):\n\t\tfor i in range(2*self.totalBins):\n\t\t\tfor j in range(2*self.totalBins):\n\t\t\t\tif not self.coma[i,j] == self.coma[j,i]:\n\t\t\t\t\tprint i,j,self.coma[i,j],self.coma[j,i]\n\t\t\t\t\treturn False\n\t\treturn True", "def is_converged(clusters1, clusters2, k, num_of_cords):\r\n for i in range(k):\r\n for j in range(num_of_cords):\r\n if clusters1[i][j] != clusters2[i][j]:\r\n return False\r\n return True", "def mpi_procs(self):\n return self._mpi_procs", "def __eq__(self, other):\n for ls, lo in zip(self.leaderboard_names, other.leaderboard_names):\n if ls != lo:\n return False\n for ls, lo in zip(self.leaderboard_groups, other.leaderboard_groups):\n if ls != lo:\n return False\n if self.top_left != other.top_left:\n return False\n if self.bottom_right != other.bottom_right:\n return False\n return True", "def __eq__(self, other):\n return self.conn == other.conn and self.p1 == other.p1 and self.p2 == other.p2", "def is_mpi_env():\n try:\n import mpi4py\n except ImportError:\n return False\n\n try:\n import mpi4py.MPI\n except ImportError:\n return False\n\n if mpi4py.MPI.COMM_WORLD.size == 1 and mpi4py.MPI.COMM_WORLD.rank == 0:\n return False\n return True", "def __eq__(self, other):\n if not isinstance(other, IamDomainGroupAllOf):\n return False\n\n return self.to_dict() == other.to_dict()", "def grid_equal(grid1, grid2):\r\n for i in range(len(grid1)):\r\n for j in range(len(grid1[i])):\r\n if grid1[i][j] != grid2[i][j]:\r\n return False\r\n return True", "def check_if_equal(self, other):\n if self.get_time_left() == other.get_time_left():\n if len(self.balls) == len(other.balls) and len(self.hexagons) == len(other.hexagons):\n for player in self.players:\n does_have_equal_player = False\n for other_player in other.players:\n if player == other_player:\n does_have_equal_player = True\n break\n if not does_have_equal_player:\n return False\n for bubble in self.balls:\n does_have_equal_bubble = False\n for other_bubble in other.balls:\n if bubble == other_bubble:\n does_have_equal_bubble = True\n break\n if not does_have_equal_bubble:\n return False\n for bubble in self.hexagons:\n does_have_equal_bubble = False\n for other_bubble in other.hexagons:\n if bubble == other_bubble:\n does_have_equal_bubble = True\n break\n if not does_have_equal_bubble:\n return False\n return True\n return False", "def Mirrorprocs(p1, p2):\n return False", "def commutes_with(self, other):\n a = self.array_form\n b = other.array_form\n if len(a) != len(b):\n raise ValueError(\"The number of elements in the permutations \\\ndon\\'t match.\")\n for i in range(len(a)-1):\n if a[b[i]] != b[a[i]]:\n return False\n return True", "def _cmp_(self, other):\n return cmp(self.matrix(), other.matrix())", "def commutes_with(self, gate: PowerMatrixGate, atol: float = 1e-7) -> bool:\n from string import ascii_lowercase as alc, ascii_uppercase as auc\n\n # Check both gates have qubits\n if self.qubits is None or gate.qubits is None:\n raise ValueError(\"Cannot check commutation between virtual gates.\")\n\n # Get shared qubits\n shared_qubits = sort(set(self.qubits).intersection(gate.qubits))\n\n # If no qubits are shared, the gates definitely commute\n if not shared_qubits:\n return True\n\n # Rename\n g1, g2 = self, gate\n\n # Get all qubits\n q12 = tuple(sort(set(g1.qubits + g2.qubits)))\n\n # Get number of qubits\n n12 = len(q12)\n\n # Get unitaries\n U1 = np.reshape(g1.matrix(), (2,) * 2 * g1.n_qubits)\n U2 = np.reshape(g2.matrix(), (2,) * 2 * g2.n_qubits)\n\n # Define how to multiply matrices\n def _mul(w1, w2):\n # Get qubits and unitaries\n q1, U1 = w1\n q2, U2 = w2\n\n # Get number of qubits\n n1 = len(q1)\n n2 = len(q2)\n\n # Construct map\n _map = ''\n _map += ''.join(alc[q12.index(q)] for q in q1)\n _map += ''.join(auc[-shared_qubits.index(q) -\n 1 if q in shared_qubits else q12.index(q)]\n for q in q1)\n _map += ','\n _map += ''.join(auc[-shared_qubits.index(q) -\n 1] if q in shared_qubits else alc[q12.index(q)]\n for q in q2)\n _map += ''.join(auc[q12.index(q)] for q in q2)\n _map += '->'\n _map += ''.join(alc[x] for x in range(n12))\n _map += ''.join(auc[x] for x in range(n12))\n\n # Multiply map\n return np.einsum(_map, U1, U2)\n\n # Compute products\n P1 = _mul((g1.qubits, U1), (g2.qubits, U2))\n P2 = _mul((g2.qubits, U2), (g1.qubits, U1))\n\n # Check if the same\n return np.allclose(P1, P2, atol=1e-5)", "def is_group(self, group_name):\n\n return group_name in self._group", "def mutexPropositions(prop1, prop2, mutexActions):\n for a1 in prop1.getProducers():\n for a2 in prop2.getProducers():\n if Pair(a1, a2) not in mutexActions:\n return False\n return True", "def g_minority_1_dev(by_grps):\n if by_grps[0][0]==by_grps[0][1]:\n print(\"Failed g_1dev_t2 -- small groups match\")\n return False\n \n cts = 0\n ctn = 0\n cto = 0\n big_letter= \"\"\n \n for item in by_grps[1]:\n if item==\"S\":\n cts+=1\n if item==\"N\":\n ctn+=1 \n if item==\"O\":\n cto+=1\n if(cts==4 or ctn==4 or cto ==4):\n pass\n else:\n print(\"Failed g_1dev_t2 -- no large group consistency\")\n return False\n \n if(cts==4):\n big_letter = \"S\"\n if(cto==4):\n big_letter = \"O\"\n if(ctn == 4):\n big_letter = \"N\"\n \n for item in by_grps[0]:\n if(item==big_letter):\n print(\"Faield g_1dev_t2 -- a small group member and large group letter are the same\")\n return False\n print(\"Confirmed g_1dev_t2 -- small group with 1 deviancy and large group are different\")\n return True", "def compareGrids(grid1, grid2):\n if axis_utils.areAxesIdentical(grid1.getLatitude(),\n grid2.getLatitude(), check_id=False)==False:\n return False\n if axis_utils.areAxesIdentical(grid1.getLongitude(),\n grid2.getLongitude(), check_id=False)==False:\n return False\n return True", "def test_by_group_no_messages_for_another_group(self):\n thread = self.create_thread()\n other_group = mommy.make('groups.Group')\n result = Thread.public.by_group(thread.group)\n self.assertNotIn(other_group, result)", "def same_rank(self, other: 'Piece') -> bool:\n\n return self.rank == other.rank", "def comm_times_group(ns, hosts):\n\n return run_on_hosts(hosts,\n '''python %sape/timings/communication/mpi_run_group.py \"%s\" %s'''%(\n ape_dir, ns, ' '.join(hosts)))", "def grouped(self) -> bool:\n return self._grouped", "def grouped(self) -> bool:\n return self._grouped", "def is_identical(self, other):\n return (self.compounddatatype == other.compounddatatype and\n self.min_row == other.min_row and\n self.max_row == other.max_row)", "def test_get_groups(self):\n group0 = self.test_save('TestGroup0')\n group1 = self.test_save('TestGroup1')\n \n group0.grant('Perm1', object0)\n group0.grant('Perm3', object1)\n group1.grant('Perm2', object1)\n \n self.assert_(group0 in get_groups(object0))\n self.assertFalse(group1 in get_groups(object0))\n self.assert_(group0 in get_groups(object1))\n self.assert_(group1 in get_groups(object1))\n self.assert_(len(get_groups(object1))==2)", "def pure_mpi(self):\n return self.has_mpi and not self.has_omp", "def __eq__(self, other):\n if not isinstance(other, ConsistencyGroup):\n return False\n\n return self.__dict__ == other.__dict__", "def test_groups(self):\n # Make a group and send to it\n channel_layer.group_add(\"tgroup\", \"tg_test\")\n channel_layer.group_add(\"tgroup\", \"tg_test2\")\n channel_layer.group_add(\"tgroup\", \"tg_test3\")\n channel_layer.group_discard(\"tgroup\", \"tg_test3\")\n channel_layer.send_group(\"tgroup\", {\"value\": \"orange\"})\n # Receive from the two channels in the group and ensure messages\n channel, message = channel_layer.receive_many([\"tg_test\"])\n self.assertEqual(channel, \"tg_test\")\n self.assertEqual(message, {\"value\": \"orange\"})\n channel, message = channel_layer.receive_many([\"tg_test2\"])\n self.assertEqual(channel, \"tg_test2\")\n self.assertEqual(message, {\"value\": \"orange\"})\n # Make sure another channel does not get a message\n channel, message = channel_layer.receive_many([\"tg_test3\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def grid_equal (grid1, grid2):\r\n for i in range (4):\r\n for j in range (4):\r\n if grid1[i][j] != grid2[i][j]:\r\n return False\r\n return True", "def grid_equal (grid1, grid2):\r\n if grid1 == grid2:\r\n return True\r\n return False", "def equivalent(kls, first, second):\n if first.empty() and second.empty():\n return True\n elif first.vertices.shape[0] != second.vertices.shape[0]:\n return False\n elif first.edges.shape[0] != second.edges.shape[0]:\n return False\n\n EPSILON = 1e-7\n\n vertex1, ct1 = np.unique(first.vertices, axis=0, return_counts=True)\n vertex2, ct2 = np.unique(second.vertices, axis=0, return_counts=True)\n \n vertex_match = np.all(np.abs(vertex1 - vertex2) < EPSILON)\n ct_match = np.all(ct1 == ct2)\n if not (vertex_match and ct_match):\n return False\n\n g1 = nx.Graph()\n g1.add_edges_from(first.edges)\n g2 = nx.Graph()\n g2.add_edges_from(second.edges)\n edges_match = nx.is_isomorphic(g1, g2)\n del g1 \n del g2\n\n if not edges_match:\n return False\n\n second_verts = {}\n for i, vert in enumerate(second.vertices):\n second_verts[tuple(vert)] = i\n \n attrs = [ attr['id'] for attr in first.extra_attributes ]\n for attr in attrs:\n buf1 = getattr(first, attr)\n buf2 = getattr(second, attr)\n if len(buf1) != len(buf2):\n return False\n\n for i in range(len(buf1)):\n i2 = second_verts[tuple(first.vertices[i])]\n if buf1[i] != buf2[i2]:\n return False\n\n return True", "def compare(self, other, enforce_mask=False, enforce_grid=False,\n enforce_area=False, enforce_aream=False, enforce_all=False):\n eps_mask = 1.0e-6\n eps_grid = 1.0e-2\n eps_area = 1.0e-1\n\n # Do a global gather to create a non-distributed attribute vector\n debugPrint( \"self.lgrid:\\n\",self.lgrid )\n debugPrint( \"other.lgrid:\\n\",other.lgrid )\n gGrid1 = attributevector.AttributeVector(self.ifields, self.rfields, self.lsize())\n gGrid1.initv(self.lgrid, self.lgrid.lsize())\n gGrid1.gather(self.lgrid, self.gsMap, comm.world_pe0, comm.component_pid, comm.local_comm) \n gGrid2 = attributevector.AttributeVector(other.ifields, other.rfields, other.lsize())\n gGrid2.initv( other.lgrid, other.lgrid.lsize() )\n gGrid2.gather(other.lgrid, self.gsMap,comm.world_pe0, comm.component_pid, comm.local_comm)\n\n # From here on, everything is done by the root pe\n if( comm.component_pid != comm.world_pe0 ):\n return\n\n # Compare size of domain\n npts1 = gGrid1.lsize()\n npts2 = gGrid2.lsize()\n npts = npts1\n\n if ( npts1 == npts2 ):\n debugPrint( \"the domain size is \",npts )\n else:\n debugPrint( \"domain size #1 = \", npts1 )\n debugPrint( \"domain size #2 = \", npts2 )\n debugPrint( \"ERROR: domain size mis-match\" )\n # call shr_sys_abort(subName // \"ERROR: domain size mis-match\")\n # Exceptions?\n\n # If there was no problem, continue:\n # Compare Domain masks:\n debugPrint(\"gData1:\\n\",gGrid1)\n debugPrint(\"gData2:\\n\",gGrid2)\n data1,data1_size = gGrid1.exportRAttr(\"mask\")#rcode)?\n data2,data2_size = gGrid2.exportRAttr(\"mask\")#rcode)?\n \n ndiff = 0\n debugPrint( \"npts:\",npts )\n debugPrint( \"length of data1:\",data1_size )\n for n in xrange(0,npts-1):\n if ( (( (abs(data1[n])) > eps_mask ) and (abs(data1[n]) < eps_mask )) or \n ( (( abs(data1[n])) < eps_mask ) and (( abs(data1[n])) > eps_mask) ) ):\n ndiff = ndiff + 1\n\n # Enforce consistency: \n # Nested function declaration\n def enforce_consistency(msg,exception=None):\n if (enforce_mask or enforce_all):\n if (ndiff > 0):\n debugPrint( msg )\n # Raise Exception\n \n enforce_consistency(\"ERROR: incompatible domain masks\")\n \n # Compute Maximum Latitude and Longitude Differences\n mask = data1\n ndiff = 0\n data1,data1_size = gGrid1.exportRAttr(\"lat\")#,rcode))\n data2,data2_size = gGrid2.exportRAttr(\"lat\")#,rcode))\n diff = 0\n max_diff = 0.0\n for n in xrange(npts):\n if( abs( mask[n] ) > eps_mask ):\n diff = abs( data1[n] - data2[n] )\n max_diff = max(max_diff, diff)\n if( diff > eps_grid ):\n ndiff = ndiff + 1\n debugPrint( \"Maximum latitude difference = \",max_diff )\n\n data1,data1_size = gGrid1.exportRAttr(\"lon\")#,rcode))\n data2,data2_size = gGrid2.exportRAttr(\"lon\")#,rcode))\n max_diff = 0.0\n\n for n in xrange(npts):\n if( abs( mask[n] ) > eps_mask ):\n x1 = data1[n]\n x2 = data2[n]\n if( x1 > x2 ): #make sure x1 < x2\n # swap(x1,x2)\n x1 = data2[n]\n x2 = data1[n]\n while( (x1+360.0) < (x2+180.0) ):#longitude is periodic\n x1 = x1 + 360.0\n diff = abs( x2 - x1 )\n max_diff = max(max_diff,diff)\n \n if (diff > eps_grid):\n ndiff = ndiff + 1\n debugPrint( \"Maximum longitude difference = \",max_diff )\n\n enforce_consistency(\"ERROR: incompatible domain grid coordinates!\")\n\n # Compare Area:\n data1,data1_size = gGrid1.exportRAttr( \"area\" )#, rcode )\n data2,data2_size = gGrid2.exportRAttr( \"area\" )#, rcode )\n\n ndiff = 0\n max_diff = 0.0\n\n for n in xrange(npts):\n if( abs( mask[n] ) > eps_mask ):\n if( data2[n] != 0.0 ):\n diff = abs( (data2[n] - data1[n]) / data2[n] )\n max_diff = max(max_diff,diff)\n if( diff > eps_area ):\n ndiff = ndiff + 1\n debugPrint( \"Maxium relative error of area (model) = \", max_diff )\n\n enforce_consistency(\"ERROR: icompatible domain area(model)\")\n\n # Compare aream\n data1,data1_size = gGrid1.exportRAttr(\"aream\")#,rcode))\n data2,data2_size = gGrid2.exportRAttr(\"aream\")#,rcode))\n\n ndiff = 0\n max_diff = 0.0\n for n in xrange(npts):\n if ( abs( mask[n] ) > eps_mask ):\n if( data2[n] != 0.0 ):\n diff = abs((data2[n] - data1[n])/data2[n])\n max_diff = max(max_diff,diff)\n if( diff > eps_area ):\n ndiff = ndiff + 1\n debugPrint( \"maximum relative error of area(map) = \",max_diff )\n\n enforce_consistency(\"ERROR: incompatible domain area (map)\")\n\n # Clean up, we're finished!\n return", "def grid_equal (grid1, grid2):\r\n if grid1 == grid2:\r\n return True\r\n else:\r\n return False", "def running_groups(self):\n return set(\n cmd.group_by for id, cmd in self.commands\n if cmd.is_running and cmd.group_by is not None\n )", "def __eq__(self, other):\n return np.all(self.grid == other.grid) and np.all(self.pos == other.pos)", "def _are_equal(grid: List[List[str]], other: List[List[str]]) -> bool:\n for row in range(len(grid)):\n for col in range(len(grid[row])):\n if grid[row][col] != other[row][col]:\n return False\n return True", "def make_comms(self,comm):\n # For masters we let child_comm be the communicator used to message the node's \n # children, and parent_comm be that used to message the node's parents.\n\n parent_rank = 0\n\n # Case (1)\n if self.num_masters > 1:\n self.make_comms_many(comm)\n if self.is_master:\n parent_comm = self.comm_masters\n if self.comm_masters.Get_rank() == 0: # rank 0 is the super-master\n child_comm = self.comm_masters\n parent_rank = None\n else:\n child_comm = self.comm_block\n # Case (2)\n else:\n self.make_comm_single(comm)\n if self.is_master:\n parent_comm = self.comm_block\n child_comm = self.comm_block\n parent_rank = None\n\n # Process initialization\n from .MPIProcess import MPIWorker, MPIMaster\n if self.is_master:\n self.set_val_data()\n num_sync_workers = self.get_num_sync_workers(child_comm)\n self.process = MPIMaster( parent_comm, parent_rank=parent_rank, \n data=self.data, child_comm=child_comm, num_epochs=self.num_epochs,\n num_sync_workers=num_sync_workers, callbacks=self.callbacks )\n else:\n self.set_train_data()\n self.process = MPIWorker( parent_comm=self.comm_block, parent_rank=parent_rank, \n num_epochs=self.num_epochs, data=self.data, callbacks=self.callbacks )", "def are_equal(self, sp1, sp2):\n set1 = set(sp1.elements)\n set2 = set(sp2.elements)\n return set1.issubset(set2) or set2.issubset(set1)", "def object_communicator():\n comm = MPI.COMM_WORLD", "def __eq__(self, other):\n if not isinstance(other, GroupModel):\n return False\n\n return self.__dict__ == other.__dict__", "def _exchange_ghosts_mpi(self):\n for d in xrange(self._dim):\n if d in self._cutdir_list:\n self._exchange_ghosts_mpi_d(d)\n else:\n self._exchange_ghosts_local_d(d)", "def _fake_message_compare(m1, m2):\r\n m1 = m1.serialize()\r\n m2 = m2.serialize()\r\n diff = False\r\n for i in range(len(m1)):\r\n if m1[i] is None:\r\n continue\r\n if m1[i] != m2[i]:\r\n diff = True\r\n break\r\n return not diff", "def are_equal(self, sp1, sp2):\n return True", "def check_mm_equal(g1, g2, mode = 0):\n assert isinstance(g1, (MM, MM0))\n assert isinstance(g2, (MM, MM0))\n g3 = np.zeros(2 * (g1.length + g2.length) + 1, dtype = np.uint32)\n status = mm_group_words_equ(g1._data, g1.length,\n g2._data, g2.length, g3)\n if status < 2:\n return not status\n\n v = get_order_vector().data\n w = mm_vector(15)\n work = mm_vector(15)\n mm_op15_copy(v, w)\n mm_op15_word(w, g3, status - 2, 1, work)\n return not mm_op15_compare(v, w)", "def grid_equal (grid1, grid2):\r\n s=0 \r\n for h in range(4):\r\n for m in range(4):\r\n if grid1[h][m]==grid2[h][m]:\r\n s+=1\r\n else:\r\n ()\r\n if s==16:\r\n return True\r\n else:\r\n return False", "def are_equal(self, sp1, sp2):\n return sp1 == sp2", "def __ne__(self, other):\n if not isinstance(other, IamDomainGroupAllOf):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(self, other):\r\n return (type(self) == type(other) and\r\n other.broadcastable == self.broadcastable)", "def isIsosceles(self):\n\t\treturn self.a == self.b or self.a == self.c or self.b == self.c", "def get_groups(board: numpy.ndarray, player: int) -> List[Group]:\n # Generate couples\n # Array of (p1, p2, x) where x = -1 if p1 == p2, 0 if p1 and p2 are close and 1 if they are close\n couples = []\n size = board.shape[0]\n for i in range(1, size - 1):\n for j in range(1, size - 1):\n if board[i, j] == player:\n l0 = [(i + x, j + y) for x, y in NEIGHBORS_1]\n l1 = [(i + x, j + y) for x, y in NEIGHBORS_2]\n for p in l0 + l1 + [(i, j)]:\n corner = all([x in [0, size - 1] for x in p])\n if 0 <= p[0] < size and 0 <= p[1] < size and board[p] == player and not corner:\n if p == (i, j):\n couples.append(((i, j), p, -1))\n elif p in l0:\n couples.append(((i, j), p, 0))\n else:\n p1, p2 = get_common_neighbours((i, j), p)\n if player not in [board[p1], board[p2]] and (board[p1] == -1 and board[p2] == -1):\n couples.append(((i, j), p, 1))\n\n # Group couples\n groups = [[k] for k in couples]\n\n def fusion(f_groups):\n for group1 in f_groups:\n for group2 in f_groups:\n if group1 != group2:\n for c1 in group1:\n for c2 in group2:\n if c1[0] == c2[0] or c1[0] == c2[1] or c1[1] == c2[0] or c1[1] == c2[1]:\n group1.extend(group2)\n f_groups.remove(group2)\n return True\n return False\n\n while fusion(groups):\n pass\n\n return groups", "def __lt__(self, other):\n if self.groupnumber < other.groupnumber:\n return True\n else:\n return False", "def e_paralelo(self, other):\n if (self == other) or (self.normaliza() == other.normaliza()):\n return True\n else:\n return False", "def is_identical(self, other):\n if self.user != other.user:\n return False\n\n my_xputs = itertools.chain(self.inputs.order_by(\"dataset_idx\"), self.outputs.order_by(\"dataset_idx\"))\n other_xputs = itertools.chain(other.inputs.order_by(\"dataset_idx\"), other.outputs.order_by(\"dataset_idx\"))\n for my_xput, other_xput in zipper(my_xputs, other_xputs, fillvalue=None):\n if my_xput is None or other_xput is None or not my_xput.is_identical(other_xput):\n return False\n return True", "def is_equal(p1,p2):\r\n return set(p1)==set(p2)", "def comp(p1,p2,adj,perm):\r\n #degree of p1\r\n f1 = 0\r\n #degree of p2\r\n f2 = 0\r\n \r\n #compute the degrees\r\n for i in range(m):\r\n if (V[p1],V[i]) in adj or (V[i],V[p1]) in adj:\r\n f1 += 1\r\n\r\n for i in range(m):\r\n if (V[p2],V[i]) in adj or (V[i],V[p2]) in adj:\r\n f2 += 1\r\n \r\n if f2 > f1:\r\n return True\r\n else:\r\n return False", "def compute_cluster_similarities(emb_clusters1, emb_clusters2, compare, order, clmethod, plot):\n def compute_sim(e, e1, cls, cls1):\n sims = np.empty((20, 20))\n xticks, yticks = [], []\n for i, c in enumerate(cls):\n yticks.append(', '.join(c[1]) + (f' {round(c[3], 5)}' if order == 'avgfreq' else ''))\n for j, c1 in enumerate(cls1):\n if len(xticks) < 20:\n xticks.append(', '.join(c1[1]) + (f' {round(c1[3], 5)}' if order == 'avgfreq' else ''))\n sims[i, j] = jaccard_similarity_score(c[2], c1[2])\n jaccard_similarities[f'{e}-{e1}'] = sims\n\n if plot:\n if order == 'clustermap':\n similarity_clustermap(sims, xticks, yticks, f'{e}-{e1}_{clmethod}')\n elif order == 'default' or order == 'avgfreq':\n similarity_heatmap(sims, xticks, yticks, f'{e}-{e1}_{clmethod}', order)\n else:\n pass\n\n jaccard_similarities = {}\n if compare == 'cross':\n for ie, (e, cls) in enumerate(emb_clusters1.items()):\n for ie1, (e1, cls1) in enumerate(emb_clusters2.items()):\n if ie < ie1:\n compute_sim(e, e1, cls, cls1)\n elif compare == 'dot':\n for (e, cls), (e1, cls1) in zip(emb_clusters1.items(), emb_clusters2.items()):\n compute_sim(e, e1, cls, cls1)\n\n return jaccard_similarities", "def __eq__(self, other):\n return (type(self) == type(other) and\n self.n == other.n and self.m == other.m and\n self.from_grid == other.from_grid and\n self.to_grid == other.to_grid)", "def __eq__(self, other):\n return (type(self) == type(other) and\n self.from_grid == other.from_grid and\n self.to_grid == other.to_grid and\n self.m == other.m and\n self.n == other.n)", "def is_group(id):\n return id.startswith('G')", "def are_clone_sequences(atoms1, atoms2):\n\n for a1, a2 in it.zip_longest(atoms1, atoms2):\n assert a1 is not a2\n assert a1.get_id() == a2.get_id()\n assert a1.get_charge() == a2.get_charge()\n assert a1.__class__ is a2.__class__", "def test_if_nodes_are_in_same_chunk(self, node_ids: Sequence[np.uint64]\n ) -> bool:\n assert len(node_ids) == 2\n return self.get_chunk_id(node_id=node_ids[0]) == \\\n self.get_chunk_id(node_id=node_ids[1])", "def __eq__(self, other):\n parent_same = self.parent1.rid == other.parent1.rid \\\n and self.parent2.rid == other.parent2.rid\n\n parents_opposite = self.parent2.rid == other.parent1.rid \\\n and self.parent1.rid == other.parent2.rid\n\n return parent_same or parents_opposite", "def balance_similar_node_groups(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"balance_similar_node_groups\")", "def can_use_mpi_pool():\n return ALLOW_SPAWN or ALREADY_RUNNING_AS_MPI", "def isComrade(self, other): # are the pieces comrades ?\r\n \r\n if self.name == other.name: \r\n return True\r\n else:\r\n return False", "def are_consecutive(card1, card2, ranks=13) -> bool:\n assert card1 != card2\n if card1 < card2:\n p = card1\n n = card2\n else:\n p = card2\n n = card1\n if p.suit != n.suit:\n return False\n if n.rank == 1:\n if p.rank == ranks:\n return True\n else:\n if n.rank - p.rank == 1:\n return True\n return False", "def __eq__(self, other):\n return isinstance(other, type(self)) and set(self.channels) == set(other.channels)", "def _make_comms(n_spokes, fullcomm=None):\n if not haveMPI:\n raise RuntimeError(\"make_comms called, but cannot import mpi4py\")\n # Ensure that the proper number of processes have been invoked\n nsp1 = n_spokes + 1 # Add 1 for the hub\n if fullcomm is None:\n fullcomm = MPI.COMM_WORLD\n n_proc = fullcomm.Get_size() \n if n_proc % nsp1 != 0:\n raise RuntimeError(f\"Need a multiple of {nsp1} processes (got {n_proc})\")\n\n # Create the strata_comm and cylinder_comm\n # Cryptic comment: intra is vertical, inter is around the hub\n global_rank = fullcomm.Get_rank()\n strata_comm = fullcomm.Split(key=global_rank, color=global_rank // nsp1)\n cylinder_comm = fullcomm.Split(key=global_rank, color=global_rank % nsp1)\n return strata_comm, cylinder_comm", "def check_for_isomorphism(graph1: list, graph2: list, directed=False) -> bool:\n matrix1 = get_adjancy_matrix(graph1, directed)\n matrix2 = get_adjancy_matrix(graph2, directed)\n\n if num_vertices(matrix1, matrix2):\n if num_edges(matrix1, matrix2):\n degrees = vertices_degree(matrix1, matrix2)\n if degrees[0]:\n return permutations(matrix1, matrix2, degrees[1:])\n return False", "def isparallel(p1, p2, tol=10*_eps):\n \n return np.linalg.norm(np.cross(p1.w, p2.w) ) < tol", "def __le__(self, other):\n if type(self) is not type(other) or len(self) != len(other):\n raise TypeError(\"these are not comparable\")\n if self.runs == other.runs:\n return True\n\n # r1 must have less runs than r0\n if len(other.runs) > len(self.runs):\n return False\n\n dico1 = other.run_indices\n\n # conversion: index of run in r0 -> index of run in r1\n dico0 = [None] * len(self.runs)\n for i, bloc in enumerate(self.runs):\n j0 = dico1[bloc[0]]\n for k in bloc:\n if dico1[k] != j0:\n return False\n dico0[i] = j0\n\n # at this point, the set partitions given by tuples are comparable\n dg0 = self.spg\n dg1 = other.dpg\n\n for i, j in dg0.edge_iterator(labels=False):\n if dico0[i] != dico0[j] and not dg1.has_edge(dico0[i], dico0[j]):\n return False\n return True", "def share_with_group_lock(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"share_with_group_lock\")", "def _shape_compare(shape1, shape2):\n if len(shape1) != len(shape2):\n return False\n for s1, s2 in zip(shape1, shape2):\n if s1 != s2:\n return False\n return True", "def valid_merge( group1, group2, target_dir, max_list_size, split_toplevel=True ):\n if len( group1 ) <= 0 or len( group2 ) <= 0:\n return True\n if ( lsize( group1 ) + lsize( group2 ) ) <= max_list_size:\n return ( not split_toplevel ) or toplevel_subdir( group1[0].path, target_dir ) == toplevel_subdir( group2[0].path, target_dir )\n return False", "def equal_multiset(vec_1, vec_2):\n lst_1, lst_2 = sorted(list(vec_1)), sorted(list(vec_2))\n if len(lst_1) != len(lst_2):\n return False\n for i, j in list(zip(lst_1, lst_2)):\n if i != j:\n return False\n return True", "def are_equal(self, sp1, sp2):\n return", "def test_equality(self):\n\n for name in TEST_NAMES:\n self.colorspace.setEqualityGroup(name)\n self.assertEqual(name, self.colorspace.getEqualityGroup())", "def compare_topology(tree1, tree2):\n n2p1, n2p2 = ({node.name: node.parent.name\n for node in tree.traverse() if not node.is_root()}\n for tree in (tree1, tree2))\n return n2p1 == n2p2" ]
[ "0.7372509", "0.61725867", "0.60191995", "0.597187", "0.5756517", "0.56709945", "0.5612317", "0.5581606", "0.5556505", "0.5444479", "0.5426828", "0.541855", "0.5415436", "0.54028773", "0.53394014", "0.52934015", "0.52109265", "0.519534", "0.5183995", "0.51758873", "0.506649", "0.50646776", "0.50601214", "0.5037635", "0.5022083", "0.50203854", "0.5006094", "0.4997834", "0.49949437", "0.4984624", "0.49797893", "0.4974044", "0.49665177", "0.49624366", "0.4961118", "0.4959956", "0.4957982", "0.49530417", "0.49442965", "0.49431378", "0.49386173", "0.49365848", "0.49359322", "0.49280807", "0.49280807", "0.4926458", "0.49255738", "0.49217096", "0.49141175", "0.49050733", "0.4902339", "0.48966345", "0.4894724", "0.48926112", "0.4889912", "0.48873037", "0.48856932", "0.48732495", "0.4864659", "0.4862405", "0.48593968", "0.4859183", "0.48571438", "0.48539957", "0.4850816", "0.48461938", "0.4821769", "0.48110235", "0.48033887", "0.48027804", "0.48020104", "0.48014146", "0.4800456", "0.4795807", "0.4786473", "0.4781825", "0.47784892", "0.4773457", "0.47724274", "0.4769003", "0.4761491", "0.47607464", "0.47527856", "0.47524473", "0.4751486", "0.47389203", "0.47369957", "0.473271", "0.47321284", "0.47247702", "0.4723781", "0.472188", "0.4718576", "0.47152922", "0.4712176", "0.47119105", "0.47086984", "0.47038856", "0.47013193", "0.47009924" ]
0.8524684
0
Find the values of ranks in target from ranks in source.
def convert_ranks(source, target): assert source != MPI.COMM_NULL and target != MPI.COMM_NULL g_source = source.Get_group() g_target = target.Get_group() size_source = g_source.Get_size() r_source = [i for i in xrange(size_source)] res = MPI.Group.Translate_ranks(g_source, r_source, g_target) return {r_source[i]: res[i] for i in xrange(size_source)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findRanks(toBeRanked, values):\n\treturn list(map(lambda e: findRank(e, values), toBeRanked))", "def rank_results(result_index, source2target):\n result2rank = defaultdict(lambda: [])\n for term, targets in result_index.items():\n ranked = sorted(targets, key=lambda tup: tup[1], reverse=True)\n ranks = rankdata([t[1] for t in ranked], method='min').tolist()\n ranks.reverse()\n for index, target in enumerate(ranked):\n if target[0] in source2target[term]:\n result2rank[term].append(ranks[index])\n return result2rank", "def find_dst_value2(target: int, targets: list):\n targets.sort()\n i, j = 0, len(targets)-1\n while i < j:\n left_value = targets[i]\n right_value = targets[j]\n if left_value + right_value == target:\n return left_value, right_value\n if left_value + right_value > target:\n j -= 1\n elif left_value + right_value < target:\n i += 1", "def findClosestNodes(self, target: hash.hash.Hash):\n # TODO: make more efficient\n # See: http://stackoverflow.com/questions/30654398/implementing-find-node-on-torrent-kademlia-routing-table\n \n nodes = []\n \n for bucket in self.buckets:\n nodes = nodes + bucket.nodes\n\n nodes.sort(key=lambda x: nodes.distanceToHash(targetHash))\n\n return nodes[:config.K]", "def findSimilarityScore(self, source, destination):\n\n\n if self.similarityScores is not None:\n return self.similarityScores[source][destination]\n\n # Project graph (if a meta path was provided)\n if self.metaPath is None:\n projectedGraph = self.graph\n else:\n if self.metaPath[0] == self.metaPath[-1]: # Homogeneous projection?\n projectedGraph = self.metaPathUtility.createHomogeneousProjection(self.graph, self.metaPath)\n else:\n projectedGraph = self.metaPathUtility.createHeterogeneousProjection(self.graph, self.metaPath)\n\n # Build initial similarity scores\n self.similarityScores = defaultdict(dict)\n nodes = self.graph.getNodes()\n for a, b in itertools.product(nodes, nodes):\n self.similarityScores[a][b] = 1 if a is b else 0\n\n self.similarityScores = self.__simRank(projectedGraph, self.similarityScores, SimRankStrategy.k)\n\n return self.similarityScores[source][destination]", "def _map_dims_(\n cls,\n source_array: List[int],\n target_array: List[int],\n source_idx: int,\n start_target_idx: int,\n source_to_target_map: DIMENSION_MAP,\n target_to_source_map: DIMENSION_MAP,\n ) -> Tuple[bool, int]:\n res, last_target_index = cls._can_reach_number_by_multiply(\n number_to_reach=source_array[source_idx], array=target_array, start_idx=start_target_idx\n )\n if not res:\n return (res, last_target_index)\n source_to_target_map[source_idx] = list(range(start_target_idx, last_target_index + 1))\n for idx in range(start_target_idx, last_target_index + 1):\n target_to_source_map[idx] = [source_idx]\n return (res, last_target_index)", "def get_scores(self):\n\n\t\tscores = np.dot(self.rankings, self.weights)\n\t\tranked_indices = np.argsort(scores)\n\t\tranked_sources = self.source_names[ranked_indices]\n\t\tranked_scores = sorted(scores)\n\t\tself.scores = {source:score for source, score in zip(ranked_sources, ranked_scores)}\n\n\t\treturn self.scores", "def _gen_matches(target_units, source_units, stoplist_set, features_size):\n for hits2positions in gen_hits2positions(\n target_units, source_units, stoplist_set, features_size):\n overhits2positions = {\n k: np.array(v) for k, v in hits2positions.items()\n if len(v) >= 2}\n for (t_ind, s_ind), positions in overhits2positions.items():\n yield (t_ind, s_ind, positions)", "def sources(self):\n if self.rank < self.midpoint:\n partner = self.midpoint + (self.rank - self.left)\n if self.rank == self.midpoint - 1 and partner == self.right:\n partners = set()\n elif self.rank == self.midpoint - 1 and partner == self.right - 2:\n partners = {partner, partner + 1}\n else:\n partners = {partner}\n else:\n partner = self.left + (self.rank - self.midpoint)\n if self.rank == self.right - 1 and partner == self.midpoint:\n partners = set()\n elif self.rank == self.right - 1 and partner == self.midpoint - 2:\n partners = {partner, partner + 1}\n else:\n partners = {partner}\n\n return partners", "def two_sum(self, nums, target):\n\n # idea: for each num, check if it complements a previously seen one\n # (keeping track of them in a dictionary)\n seek = {}\n\n for ind, element in enumerate(nums):\n if element in seek:\n return [seek[element], ind]\n else:\n seek[target - element] = ind\n\n return []", "def twoSum(self, nums: List[int], target: int) -> List[int]:\n # Use a dict to record visited numbers\n d = {}\n for i, n in enumerate(nums):\n m = target - n\n if m in d:\n return [d[m], i]\n else:\n d[n] = i", "def sinks(self):\n if self.rank < self.midpoint:\n partner = self.midpoint + (self.rank - self.left)\n if partner == self.right:\n partner -= 1\n else:\n partner = self.left + (self.rank - self.midpoint)\n if partner == self.midpoint:\n partner -= 1\n\n return {partner}", "def as_paired_ranks(x, y):\n n = len(x)\n paired = zip(x,y)\n x = list(x)\n y = list(y)\n x.sort()\n y.sort()\n rank_val_map_x = dict(zip(x, range(n)))\n rank_val_map_y = dict(zip(y, range(n)))\n ranked = []\n for i in range(n):\n ranked += [[rank_val_map_x[paired[i][0]], rank_val_map_y[paired[i][1]]]]\n return ranked", "def similarity(self, source, target):\n results = { m.name: m.similarity(source, target) for m in self.metrics }\n return results", "def get_targets(\n self, source: Tuple[str, str], relation: Optional[str] = None\n ) -> List[Node]:\n return self.get_common_targets([source], relation)", "def _measure(d, sources, target, niter=25, bound=None):\n uniques = {}\n for source in sources:\n others = list(sources)\n others.remove(source)\n others = list(flatten(others))\n uniques[source] = two_way_skar(d, [source, target], others)\n return uniques", "def searchRange4(self, nums: List[int], target: int) -> List[int]:\n def bisearch_l() -> int:\n i = -1\n l, r = 0, len(nums) - 1\n while l <= r:\n m = (l + r) // 2\n if nums[m] >= target:\n r = m - 1\n else:\n l = m + 1\n \n if nums[m] == target:\n i = m\n \n return i\n\n def bisearch_r() -> int:\n i = -1\n l, r = 0, len(nums) - 1\n while l <= r:\n m = (l + r) // 2\n if nums[m] > target:\n r = m - 1\n else:\n l = m + 1\n \n if nums[m] == target:\n i = m\n \n return i\n\n return [bisearch_l(), bisearch_r()]", "def find_targetnodes(self):\n\n self.connect_backwards()\n\n targetnodes = []\n for n in self.find_datanodes():\n if len(n.receives_from) > 0:\n targetnodes.append(n)\n return targetnodes", "def target_nodes_indexes(self) -> _TargetNodes:\n return self.__target_nodes_indexes", "def getResult(targets, i=None):", "def ranking_loss(scores, targets):\n costs = targets[1]\n true_ants = targets[2]\n weights = targets[4] if len(targets) == 5 else None\n true_ant_score = torch.gather(scores, 1, true_ants)\n top_true, _ = true_ant_score.max(dim=1)\n tmp_loss = scores.add(1).add(\n top_true.unsqueeze(1).neg()\n ) # 1 + scores - top_true\n if weights is not None:\n tmp_loss = tmp_loss.mul(weights)\n tmp_loss = tmp_loss.mul(costs)\n loss, _ = tmp_loss.max(dim=1)\n out_score = torch.sum(loss)\n return out_score / n", "def twoSum(self, nums: List[int], target: int) -> List[int]:\n d = {}\n for i, n in enumerate(nums):\n d[n]=i\n \n for i, n in enumerate(nums):\n m = target - n\n if m in d and d[m] != i:\n return [i,d[m]]\n return []", "def match_chunk_permuted(src, target, indices, match_bounds=False):\n\n ds = src.datashape.copy()\n ds.dim_low = list(ds.dim_low)\n ds.dim_high = list(ds.dim_high)\n ds_target = target.datashape.copy()\n ds_target.dim_low = list(ds_target.dim_low)\n ds_target.dim_high = list(ds_target.dim_high)\n\n hi1 = ds.dim_high\n hi2 = ds_target.dim_high\n\n # lookup array dounds if schema is unbound\n if match_bounds:\n if any(l is None for l in hi1):\n tops = src.unpack('_').max().toarray()\n hi1 = [int(tops['%s_max' % l][0]) for l in src.dim_names]\n if any(l is None for l in hi2):\n tops = target.unpack('_').max().toarray()\n hi2 = [int(tops['%s_max' % l][0]) for l in target.dim_names]\n\n for i, j in indices:\n if not isinstance(i, int):\n i = target.dim_names.index(i)\n if not isinstance(j, int):\n j = src.dim_names.index(j)\n ds.chunk_size[j] = target.datashape.chunk_size[i]\n ds.chunk_overlap[j] = target.datashape.chunk_overlap[i]\n if match_bounds:\n l = min(ds.dim_low[j], ds_target.dim_low[i])\n h = max(hi1[j], hi2[i])\n\n ds.dim_low[j] = l\n ds.dim_high[j] = h\n ds_target.dim_low[i] = l\n ds_target.dim_high[i] = h\n\n if ds.schema != src.datashape.schema:\n src = src.redimension(ds.schema)\n if ds_target.schema != target.datashape.schema:\n target = target.redimension(ds_target.schema)\n\n return src, target", "def twoSum(self, nums, target):\n\n seen = {}\n for position, num in enumerate(nums):\n remaining = target - num\n if remaining in seen:\n return [seen[remaining], position]\n seen[num] = position\n return []", "def solutionByOthers(self, nums, target):\n nums.sort()\n results = []\n\n self._findNSum( nums, target, 4, [], results )\n return results", "def _target(self, data):\n relative_values = abs(data - data.mean())\n index = relative_values.idxmax()\n value = relative_values[index]\n return index, value", "def match_nodes(source_node, target_node):\n\n node_position = cmds.xform(source_node, q=True, ws=True, t=True)\n node_rotation = cmds.xform(source_node, q=True, ws=True, ro=True)\n cmds.xform(target_node, ws=True, t=node_position)\n cmds.xform(target_node, ws=True, ro=node_rotation)", "def find_mutual_nn(self):\n best_match_src = self.scores.argmax(1) # Best match for each source word\n best_match_trg = self.scores.argmax(0) # Best match for each source word\n\n # ONELIENER\n # paired_idx = [(i,best_match_src[i]) for i in range(self.ns) if best_match_trg[best_match_src[i]] == i]\n # paired_words = [(self.src_words[i],self.trg_words[j]) for (i,j) in paired_idx]\n paired = []\n for i in range(self.ns):\n m = best_match_src[i]\n if best_match_trg[m] == i:\n paired.append((i,m))\n\n paired_toks = []\n if self.src_words and self.trg_words:\n paired_toks = [(self.src_words[i],self.trg_words[j]) for (i,j) in paired]\n else:\n paired_toks = paired\n return paired_toks", "def get_matches(self, first, second):\n matches = self._match_table.dropna(0)[\n [first.position.id, second.position.id]].astype(int).values\n return matches", "def match_scatter_curves(target_data, source_data):\n\n # Create list of calculated I values matched to the nearest experimental q\n # Remember that the arrays in python start at 0, those in Fortran at 1\n last_source = len(source_data)\n last_target = len(target_data)\n\n # Initialize array to hold the calculated I values matched to\n # experimental Q values\n matched_I = np.zeros(last_target, dtype=float)\n\n # Use the old fortran routine to match the data sets by q value\n # matched_no is the number of datapoints which contain matched data\n matched_no = sjp_util.qrange_match(target_data[:, 0], source_data[:, 0],\n source_data[:, 1], last_target,\n last_source, matched_I)\n\n matched_I.resize(matched_no)\n\n return matched_I", "def paired_points_matching(source, target):\n assert source.shape == target.shape\n T = np.eye(4)\n R = np.eye(3)\n t = np.zeros((1, 3))\n\n m = source.shape[1]\n\n centroid_A = np.mean(source, axis=0)\n centroid_B = np.mean(target, axis=0)\n AA = source - np.mean(source, axis=0)\n BB = target - np.mean(target, axis=0)\n\n H = np.dot(AA.T, BB)\n U, S, Vt = np.linalg.svd(H)\n R = np.dot(Vt.T, U.T)\n\n if np.linalg.det(R) < 0:\n Vt[m-1,:] *= -1\n R = np.dot(Vt.T, U.T)\n\n t = centroid_B.T - np.dot(R,centroid_A.T)\n\n T = np.identity(m+1)\n T[:m, :m] = R\n T[:m, m] = t\n\n return T, R, t", "def __twoSum(self, numbers, target):\n dic = {}\n for i, value in enumerate(numbers):\n complement = target - value\n if complement in dic:\n return [dic[complement], i]\n else:\n # index the new value\n dic[value] = i", "def _scan_targets(self, indices_to_nodes, node_property, source_index,\n factor_aggregator, compute_statistics,\n total_factor_instances,\n generated_edges, reverse_edges=False, limit=None,\n verbose=False):\n edge_list = []\n for target_index in range(source_index + 1, len(indices_to_nodes)):\n s = indices_to_nodes[source_index]\n t = indices_to_nodes[target_index]\n\n if node_property is not None:\n s_factors, t_factors = self._get_node_factors(\n s, t, node_property, factor_aggregator)\n else:\n if factor_aggregator is None:\n factor_aggregator = aggregate_index\n s_factors, t_factors = self._get_edge_factors(\n s, t, factor_aggregator, reverse_edges)\n\n common_factors = safe_intersection(\n s_factors, t_factors)\n\n if len(common_factors) > 0:\n edge = {\n \"@source_id\": s,\n \"@target_id\": t,\n \"common_factors\": common_factors\n }\n\n for stat in compute_statistics:\n edge[stat] = COOCCURRENCE_STATISTICS[stat](\n self.pgframe, s, t,\n node_property,\n common_factors,\n total_factor_instances,\n factor_aggregator,\n reverse_edges)\n\n edge_list.append(edge)\n\n if limit:\n if len(generated_edges) + len(edge_list) == limit:\n if verbose:\n print(\"Reached the edge limit ({})\".format(limit))\n return edge_list\n\n return edge_list", "def _bin_hits_to_unit_indices(rows, cols, target_breaks, source_breaks,\n su_start):\n # keep track of mapping between matrix row index and target unit index\n # in ``target_units``\n row2t_unit_ind = np.array([\n u_ind\n for u_ind in range(len(target_breaks) - 1)\n for _ in range(target_breaks[u_ind+1] - target_breaks[u_ind])])\n # keep track of mapping between matrix column index and source unit index\n # in ``source_units``\n col2s_unit_ind = np.array([\n u_ind\n for u_ind in range(len(source_breaks) - 1)\n for _ in range(source_breaks[u_ind+1] - source_breaks[u_ind])])\n tmp = {}\n hits2positions = {}\n t_inds = row2t_unit_ind[rows]\n s_inds = col2s_unit_ind[cols]\n t_poses = rows - target_breaks[t_inds]\n s_poses = cols - source_breaks[s_inds]\n # although s_inds needs to index the source_breaks by the ordering of this\n # batch of source_units, s_inds needs to account for source_unit indices as\n # referenced from outside of this batch\n s_inds += su_start\n for t_ind, s_ind, t_pos, s_pos in zip(t_inds, s_inds, t_poses, s_poses):\n key = (t_ind, s_ind)\n if key not in tmp:\n tmp[key] = (t_pos, s_pos)\n elif key not in hits2positions:\n hits2positions[key] = [tmp[key], (t_pos, s_pos)]\n else:\n hits2positions[key].append((t_pos, s_pos))\n hits2positions = {k: np.array(v) for k, v in hits2positions.items()}\n return hits2positions", "def find_nearest(ref_array,target_array):\n ref_tree = scipy.spatial.cKDTree(ref_array)\n dist, indices = ref_tree.query(target_array, k=1)\n return indices", "def two_sum(self, nums: List[int], target: int) -> List[int]:\n found = {}\n\n for idx, value in enumerate(nums):\n rest = target - nums[idx]\n if rest in found:\n return [idx, found[rest]]\n else:\n found[value] = idx", "def _measure(d, sources, target, niter=25, bound=None):\n uniques = {}\n for source in sources:\n others = list(sources)\n others.remove(source)\n others = list(flatten(others))\n uniques[source] = one_way_skar(d, source, target, others)\n return uniques", "def _measure(d, sources, target, niter=25, bound=None):\n uniques = {}\n for source in sources:\n others = list(sources)\n others.remove(source)\n others = list(flatten(others))\n uniques[source] = one_way_skar(d, target, source, others)\n return uniques", "def get_orientation_from_to(self, source, destination):\r\n if destination not in source.get_neighbors():\r\n return None\r\n return [x for x in source.neighbors.keys() if source.neighbors[x] == destination][0]", "def twoSum(self, nums: List[int], target: int) -> List[int]:\n \n # Given nums=[2,7,11,15],target=9\n \n d={}\n for i in range(len(nums)):\n x = target-nums[i]\n if x in d:\n return [d[x],i]\n\n d[nums[i]]=i\n\n return []", "def _get_matrix(self, source_points, destination_points):\n return [\n [self.measure_between_two_points(point_a, point_b) for point_b in destination_points]\n for point_a in source_points\n ]", "def adapt_target(self, target):\n\n target = target.view(-1)\n new_target = [target.clone()]\n target_idxs = []\n\n for i in range(len(self.cutoff) - 1):\n mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))\n new_target[0][mask] = self.cutoff[0] + i - self.buggy_offset\n\n if mask.any():\n target_idxs.append(mask.nonzero().squeeze(1))\n new_target.append(target[mask].add(-self.cutoff[i]))\n else:\n target_idxs.append(None)\n new_target.append(None)\n\n return new_target, target_idxs", "def compute_distances(src):\n rr = np.vstack((src[0]['rr'][src[0]['inuse'].astype(np.bool)],\n src[1]['rr'][src[1]['inuse'].astype(np.bool)]))\n return distance.squareform(distance.pdist(rr))", "def explore(self, nums, left, right, target):\n diff = sys.maxsize\n\n while left < right:\n cur_sum = nums[left] + nums[right]\n if cur_sum == target:\n return 0\n \n if abs(target - cur_sum) < abs(diff):\n diff = target - cur_sum\n if cur_sum < target:\n left += 1\n else:\n right -= 1\n return diff", "def rank_transform(self):\n sorted_targets = sorted(self.genomes, key=lambda item: item.fitness)\n for index, target in enumerate(sorted_targets):\n target.fitness = index/len(sorted_targets) - 0.5", "def get_sources(self, target: Tuple[str, str], relation: str = None) -> List[Node]:\n return self.get_common_sources([target], relation)", "def greedy_nearest_neighbor(sources, targets):\n source_output = np.empty_like(sources)\n target_output = np.empty_like(targets)\n\n N = len(sources)\n distance_sq = distance.cdist(sources, targets, 'sqeuclidean')\n for i in range(N):\n min_idx = np.argmin(distance_sq)\n s,t = np.unravel_index(min_idx, distance_sq.shape)\n\n source_output[i,:] = sources[s,:]\n target_output[i,:] = targets[t,:]\n\n # Set these to inf to prevent them from being the minimum\n distance_sq[s,:] = np.inf\n distance_sq[:,t] = np.inf\n\n return source_output, target_output", "def get_targets(\n self, gt_match_indices: List[Tensor],\n key_sampling_results: List[SamplingResult],\n ref_sampling_results: List[SamplingResult]) -> Tuple[List, List]:\n\n track_targets = []\n track_weights = []\n for _gt_match_indices, key_res, ref_res in zip(gt_match_indices,\n key_sampling_results,\n ref_sampling_results):\n targets = _gt_match_indices.new_zeros(\n (key_res.pos_bboxes.size(0), ref_res.bboxes.size(0)),\n dtype=torch.int)\n _match_indices = _gt_match_indices[key_res.pos_assigned_gt_inds]\n pos2pos = (_match_indices.view(\n -1, 1) == ref_res.pos_assigned_gt_inds.view(1, -1)).int()\n targets[:, :pos2pos.size(1)] = pos2pos\n weights = (targets.sum(dim=1) > 0).float()\n track_targets.append(targets)\n track_weights.append(weights)\n return track_targets, track_weights", "def get_ranks(d): \n raise NotImplementedError(\"Problem 3 Incomplete\")", "def get_src_posit_obs_margs(margs, msources, candids):\n psources = margs[:,0,1][candids]\n idx=candids[psources.argsort()[::-1]]\n #print(idx)\n pos = np.mean([np.argmax(idx == s) for s in msources])\n return pos/len(candids)", "def twoSum(self, nums: List[int], target: int) -> List[int]:\n diffRec = {}\n for i, v in enumerate(nums):\n if v in diffRec:\n return [diffRec[v], i]\n else:\n diffRec[target - v] = i\n return -1", "def getDistsSourceToNodes(self):\r\n self.run()\r\n return self.dists_so_far", "def extract_list(self, target_port_rank):\n if target_port_rank <= 0:\n raise ValueError(\n 'Invalid input {}. No ports can be selected'.format(target_port_rank)\n )\n\n service_port_list = sorted(self.__port_map.values())\n port_list = list(ele.port_num for ele in service_port_list)\n return sorted(port_list[:target_port_rank])", "def gen_hits2positions(\n target_units, source_units, stoplist_set, features_size):\n target_feature_matrix, target_breaks = _construct_unit_feature_matrix(\n target_units, stoplist_set, features_size)\n stepsize = 500\n for su_start in range(0, len(source_units), stepsize):\n feature_source_matrix, source_breaks = _construct_feature_unit_matrix(\n source_units[su_start:su_start+stepsize],\n stoplist_set,\n features_size)\n # for every position of each target unit, this matrix multiplication\n # picks up which source unit positions shared at least one common\n # feature\n match_matrix = target_feature_matrix.dot(feature_source_matrix)\n # this data structure keeps track of which target unit position matched\n # with which source unit position\n coo = match_matrix.tocoo()\n yield _bin_hits_to_unit_indices(\n coo.row, coo.col, target_breaks, source_breaks, su_start)", "def target_ids(self):\n\n return self._target_ids", "def get_target_per_score(self):\n pass", "def get_for_targets(self, targets):\n products = OrderedSet()\n for target in targets:\n products.update(self._products_by_target[target])\n return products", "def getTargetPositions(rg):\n targetPositions = OrderedDict()\n for r in rg.robotDict.values():\n x, y, z = r.metFiberPos\n targetPositions[r.id] = [x, y]\n return targetPositions", "def find_value(lists, target):\n loc = []\n l = len(lists)\n for i in range(0, l, 1):\n if(lists[i] == target):\n loc.append(i)\n else:\n continue\n return loc", "def common_nbrs(self, u, v):\n u_adj = self.nx_graph.neighbors(u)\n v_adj = self.nx_graph.neighbors(v)\n nbrs = []\n for u in u_adj:\n if u in v_adj:\n nbrs.append(u)\n\n return nbrs, u_adj, v_adj", "def read_result(result_file, source2target):\n result_index = defaultdict(lambda: [])\n \n with open(result_file) as tsvfile:\n tsvreader = csv.reader(tsvfile, delimiter='\\t')\n for row in tsvreader: \n source = row[0]\n target = row[1]\n score = float(row[2])\n\n if source in source2target:\n result_index[source].append((target, score))\n else:\n logging.warning('Source term \"%s\" not found in the gold standard' % source)\n\n return result_index", "def get_common_targets(\n self,\n sources: List[Tuple[str, str]],\n relation: str,\n ) -> List[Node]:\n rel_str = \":%s\" % relation if relation else \"\"\n parts = [\n \"({id: '%s'})-[%s]->(t)\" % (norm_id(*source), rel_str) for source in sources\n ]\n query = \"\"\"\n MATCH %s\n RETURN DISTINCT t\n \"\"\" % \",\".join(\n parts\n )\n nodes = [self.neo4j_to_node(res[0]) for res in self.query_tx(query)]\n return nodes", "def calculate_single_cycle_gan_metrics(source, target):\n metric_results = MetricsContainer(len(source))\n for i, x in enumerate(source):\n scores = torch.mean(torch.abs(x.view(1, -1) - target), dim=1)\n _, indices = torch.sort(scores, descending=False)\n indices = indices.cpu().numpy()\n rank = np.nonzero(indices == i)[0][0] + 1\n metric_results.update(rank)\n return metric_results.get_results()", "def traverse(self, source):\r\n key = self.d.keys()\r\n #check for source in graph\r\n if source not in key:\r\n raise KeyError(str(source) + \" is not in graph!\")\r\n #initialize V, Q and M\r\n V = []\r\n Q = deque()\r\n Q.append(source)\r\n M = set(source)\r\n #while Q is not empty\r\n while Q:\r\n #take first element of queue\r\n current = Q.popleft()\r\n #add it to V\r\n V.append(current)\r\n neighbors = self.d[current]\r\n #for each value associated with this key\r\n for n in neighbors:\r\n #if it isn't in M, add it to M and end of Q\r\n if n not in M:\r\n Q.append(n)\r\n M.add(n)\r\n return V", "def find_pairs(candidate_array, TARGET_VALUE=10):\r\n \r\n from collections import defaultdict\r\n positions = defaultdict(list)\r\n \r\n #Read everything into a dictionary, storing the original array position \r\n for i in range(len(candidate_array)):\r\n positions[candidate_array[i]].append(i)\r\n\r\n #Read list comparing value to TARGET_VALUE \r\n for i in range(len(candidate_array)):\r\n pair_value = TARGET_VALUE - candidate_array[i]\r\n if positions[pair_value]:\r\n for p in positions[pair_value]:\r\n if p > i:\r\n #print \"%d,%d\" % (candidate_array[i], pair_value)\r\n None", "def _get_target_remotes(self, indices):\n indices = self._get_indices(indices)\n return [self.remotes[i] for i in indices]", "def targets(self) -> List[List[float]]:\n return [d.targets for d in self.data]", "def firstNeighbor(v):\n for u in ranks:\n if u in matched or not self.adjacent(u, v):\n continue\n\n return u", "def searchRange(self, nums: List[int], target: int) -> List[int]:\n if not nums:\n return [-1, -1]\n n = len(nums)\n start, end = 0, n - 1\n while start <= end:\n mid = start + (end - start + 1 + 1)//2 - 1\n left = right = -1\n if nums[mid] == target:\n left = right = mid\n elif nums[start] == target:\n left = right = start\n elif nums[end] == target:\n left = right = end\n\n if 0 <= left and left < n:\n has_left = left - 1 >= 0 and nums[left-1] == target\n has_right = right + 1 < n and nums[right+1] == target\n while has_left or has_right:\n if has_left:\n left -= 1\n if has_right:\n right += 1\n has_left = left - 1 >= 0 and nums[left-1] == target\n has_right = right + 1 < n and nums[right+1] == target\n\n return [left, right]\n\n elif nums[mid] > target:\n # [0, mid - 1]\n end = mid - 1\n else:\n # [mid + 1, n]\n start = mid + 1\n\n return [-1, -1]", "def get_common_sources(\n self, targets: List[Tuple[str, str]], relation: str\n ) -> List[Node]:\n rel_str = \":%s\" % relation if relation else \"\"\n parts = [\n \"(s)-[%s]->({id: '%s'})\" % (rel_str, norm_id(*target)) for target in targets\n ]\n query = \"\"\"\n MATCH %s\n RETURN DISTINCT s\n \"\"\" % \",\".join(\n parts\n )\n nodes = [self.neo4j_to_node(res[0]) for res in self.query_tx(query)]\n return nodes", "def get_sources(self, target):\n return sorted(list({t[0].split('.')[0]\n for t in self.mapping.items()\n if target in [c.split('.')[0]\n for c in type(t[1]) is dict and t[1].keys() or ()]}))", "def search_highest_k_neighbor(self, k):\n max_score = 0\n target_node = None\n from_idx = None\n to_idx = None\n for i in range(k):\n node = self.graph.nodes[random.randrange(len(self.graph.nodes))]\n cluster_idx = self.search_cluster_by_node(node)\n if len(self.result[cluster_idx].get_nodes()) == 1:\n end_i = len(self.result)\n else:\n end_i = len(self.result) + 1\n\n random_cluster_idx = random.randrange(end_i)\n if random_cluster_idx != cluster_idx:\n tried_score = self.try_replace_node(node, cluster_idx, random_cluster_idx)\n if max_score < tried_score:\n max_score = tried_score\n target_node = node\n from_idx = cluster_idx\n to_idx = random_cluster_idx\n\n return max_score, target_node, from_idx, to_idx", "def set_ranking_users():\n data = select_data_source()\n order_id = data['id']\n from_user = data['from']\n to_user = data['to']\n ranking = float(data['ranking']) # TODO.\n \n if session._id != from_user and session._id != to_user : return permission_denied_return\n \n db = database.getdb()\n \n if not float.is_integer(ranking) or ranking <= 0 or 10 < ranking :\n return ranking_invalid_value_return\n \n ### Check if order exists.\n \n cmd = 'select * from orders where id==\"{0}\"'.format(order_id)\n order_info = db.execute(cmd).fetchall()\n if len(order_info) == 0 :\n return ranking_not_exist_return\n \n ### Check if user is valid.\n cmd = 'select owner, customer from orders where id==\"{0}\"'.format(order_id)\n owner, customer = db.execute(cmd).fetchall()[0]\n \n ### Check and setup ranked info in orders.\n \n if from_user == owner and to_user == customer :\n cmd = 'select owner_ranked from orders where id==\"{0}\"'.format(order_id)\n is_ranked = db.execute(cmd).fetchall()[0][0]\n if is_ranked != 0 :\n return ranking_already_ranked_return\n cmd = 'update orders set owner_ranked=1'\n db.execute(cmd)\n db.commit()\n elif from_user == customer and to_user == owner :\n cmd = 'select customer_ranked from orders where id==\"{0}\"'.format(order_id)\n is_ranked = db.execute(cmd).fetchall()[0][0]\n if is_ranked != 0 :\n return ranking_already_ranked_return\n cmd = 'update orders set customer_ranked=1'\n db.execute(cmd)\n db.commit()\n else :\n return ranking_not_relative_return\n \n ### Update rank to to_user.\n \n cmd = 'select rank, rank_time from users where mail==\"{0}\"'.format(to_user)\n rank, rank_time = db.execute(cmd).fetchall()[0]\n \n rank = (rank * rank_time + ranking) / (rank_time + 1)\n rank_time += 1\n \n cmd = 'update users set rank={0}, rank_time={1} where mail=\"{2}\"'.format(rank, rank_time, to_user)\n db.execute(cmd)\n db.commit()", "def linearSearch(values: list, target: int) -> int:\n for i in range(len(values)):\n if target == values[i]:\n return i\n \n return -1", "def _match_users_with_karma(rankings, user_key, karma_key):\n if not rankings:\n return []\n\n giver_ids = [r[user_key] for r in rankings]\n ids_to_users = User.objects.select_related(\n 'userprofile').in_bulk(giver_ids)\n return [(ids_to_users[r[user_key]], r[karma_key]) \\\n for r in rankings]", "def transferRadii(points_from, radii_from, points_to, radii_to):\n \n # assume points are sorted lists of indices\n match = ld.match(points_from, points_to);\n \n ids = ld.where(match >= 0);\n match = ld.take(match, ids);\n radii = ld.take(radii_from, ids);\n \n ld.setArray(radii_to, match, radii);\n \n return radii_to;", "def get_relevant_images_rank(img_lst, img_map, indices, distances, k,operation=\"union\"):\n # k = k \n set_lst = []\n helper = []\n helper2 = []\n for img in img_lst:\n ind_dist = get_similar_imgs_rank(img, img_map, indices, distances, k=k)\n helper.append(ind_dist[0])\n set_lst.append(ind_dist[1])\n helper2.append(set(ind_dist[0]))\n\n # distances = distances[:k]\n helper = sum(helper, [])\n set_lst = sum(set_lst, [])\n\n\n df = pd.DataFrame({\n \"indices\": helper,\n \"distances\": set_lst\n })\n\n if operation == \"union\":\n # imgs = list(set.union(*df[\"indices\"]))\n # print(len(df))\n df = df.drop_duplicates(subset=\"indices\")\n # print(len(df))\n\n df = df.sort_values(\"distances\")\n print(df)\n return df[\"indices\"].values\n if operation == \"intersection\":\n # inter = list(set.intersection(*helper2))\n # print(inter)\n df = df[df[\"indices\"].isin(list(set.intersection(*helper2)))]\n df = df.drop_duplicates(subset=\"indices\")\n df = df.sort_values(\"distances\")\n # print(df)\n return df[\"indices\"].values", "def __call__(self, target_labels: List[Tensor], fg_probs: Tensor):\n anchors_per_image = [anchors_in_image.shape[0] for anchors_in_image in target_labels]\n fg_probs = fg_probs.split(anchors_per_image, 0)\n\n pos_idx = []\n neg_idx = []\n for img_labels, img_fg_probs in zip(target_labels, fg_probs):\n positive = torch.where(img_labels >= 1)[0]\n negative = torch.where(img_labels == 0)[0]\n\n num_pos = self.get_num_pos(positive)\n pos_idx_per_image_mask = self.select_positives(\n positive, num_pos, img_labels, img_fg_probs)\n pos_idx.append(pos_idx_per_image_mask)\n\n num_neg = self.get_num_neg(negative, num_pos)\n neg_idx_per_image_mask = self.select_negatives(\n negative, num_neg, img_labels, img_fg_probs)\n neg_idx.append(neg_idx_per_image_mask)\n\n return pos_idx, neg_idx", "def _find(self, candidates, target, lb, rb):\n # we'v made sure there's no duplicate in candidates\n li, ri = lb, rb\n while li < ri:\n mi = (li + ri) // 2\n if candidates[mi] < target:\n li = mi + 1\n elif candidates[mi] > target:\n ri = mi - 1\n else:\n return mi\n\n if li == ri:\n if candidates[li] <= target:\n return li\n else:\n return li - 1\n\n if ri < lb:\n return ri\n\n if li == rb:\n return rb - 1\n\n # now it's like c[ri] < target < c[li]\n # actually these 3 cases are all ri...\n return ri", "def score(mat: Tensor, target: Tensor, k: int) -> tuple:\n # number of users with similarities larger than the matched users\n rank = (mat >= target).sum(1)\n # rank = min(rank, k + 1)\n rank = rank.min(torch.tensor(k + 1).cuda())\n tmp = (k + 1 - rank).float()\n # hit_precision@k\n hit_score = (tmp / k).mean()\n # precision@k\n coverage = (tmp > 0).float().mean()\n return coverage, hit_score", "def rank_potential_items(self, target_user_id, top_k_users):\n items_rank = {}\n target_user = self.users[target_user_id]\n for user_id in top_k_users:\n sim_user = self.users[user_id]\n sim = self.sim_matrix[target_user_id][user_id]\n for item_id, item_time in sim_user.covered_items.items():\n if self.ensure_new and (item_id in target_user.covered_items):\n continue # skip item that already been bought\n if self.timestamp:\n # note that time context model cannot be evaluated\n # properly using offline data, this is just a demon\n # user's interest for this history item\n t_now = 1146454548\n time_elapse = Model.time_elapse(item_time, t_now)\n score = time_elapse*sim\n else:\n score = sim\n try:\n items_rank[item_id] += score\n except KeyError:\n items_rank[item_id] = score\n # assert len(items_rank) >= self.n\n return items_rank", "def combinationSum2(self, candidates, target):\n result = list()\n path = list()\n candidates.sort()\n self.dfs(candidates, target, path, result)\n return result", "def retrieval_reciprocal_rank(preds: Tensor, target: Tensor) ->Tensor:\n preds, target = _check_retrieval_functional_inputs(preds, target)\n if not target.sum():\n return tensor(0.0, device=preds.device)\n target = target[torch.argsort(preds, dim=-1, descending=True)]\n position = torch.nonzero(target).view(-1)\n res = 1.0 / (position[0] + 1.0)\n return res", "def find_nearest(numbers, target):\n numbers = np.asarray(numbers)\n idx = (np.abs(numbers - target)).argmin()\n return numbers[idx]", "def node_targets(self, node):\r\n node = self.coalesce_node(node)\r\n nodes =[conn[1] for conn in self.connections if conn[0] == node]\r\n return nodes", "def correct(output, target, topk=(1,)):\n maxk = max(topk)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).sum(0, keepdim=True)\n res.append(correct_k)\n return res", "def correct(output, target, topk=(1,)):\n maxk = max(topk)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).sum(0, keepdim=True)\n res.append(correct_k)\n return res", "def get_pulling_indices(self, weight):\n pass", "def twoSum(self, nums: List[int], target: int) -> List[int]:\n dic = {target-n : i for i, n in enumerate(nums)}\n return next(([i, dic[n]] for i, n in enumerate(nums) if n in dic and i != dic[n]), [0, 0])", "def findDist(digraph, src, dest):\n for i in digraph.edges[src]:\n if i[0]==str(dest):\n result=i[1][0]\n return result", "def twoSum(self, nums, target):\n for index_i, i in enumerate(nums):\n for index_j, j in enumerate(nums[index_i+1:]):\n if j + i == target:\n return [index_i + 1, index_j + index_i + 2]", "def keys(targets):", "def get_scores(self, graph, current, destinations):\n scores = []\n for node in destinations:\n edge = graph.edges[current, node]\n score = self.score_edge(edge)\n scores.append(score)\n return scores", "def get_target_relations(\n self,\n source: Tuple[str, str],\n relation: Optional[str] = None,\n ) -> List[Relation]:\n return self.get_relations(source=source, target=None, relation=relation)", "def rh_get_match(x, y, k, d=7, hash_size=\"automatic\"):\n n = len(x)\n m = len(y)\n\n if hash_size == \"automatic\":\n q = n * m\n else:\n q = hash_size\n\n output = list()\n\n for i in range(n - k + 1):\n matches = rabin_karp_matcher(\n target=y,\n potential=x[i:i + k],\n d=d,\n q=q\n )\n\n for match in matches:\n output.append((i, match))\n\n return output", "def get_targets(self, rois: Tensor, rel_roi_points: Tensor,\n sampling_results: List[SamplingResult],\n batch_gt_instances: InstanceList,\n cfg: ConfigType) -> Tensor:\n\n num_imgs = len(sampling_results)\n rois_list = []\n rel_roi_points_list = []\n for batch_ind in range(num_imgs):\n inds = (rois[:, 0] == batch_ind)\n rois_list.append(rois[inds])\n rel_roi_points_list.append(rel_roi_points[inds])\n pos_assigned_gt_inds_list = [\n res.pos_assigned_gt_inds for res in sampling_results\n ]\n cfg_list = [cfg for _ in range(num_imgs)]\n\n point_targets = map(self._get_targets_single, rois_list,\n rel_roi_points_list, pos_assigned_gt_inds_list,\n batch_gt_instances, cfg_list)\n point_targets = list(point_targets)\n\n if len(point_targets) > 0:\n point_targets = torch.cat(point_targets)\n\n return point_targets", "def get_targets(self, df):\n return df.iloc[:, self.target_col]", "def _apply_rank(U, S, VT, r, verbose=False):\n if r is None:\n r = len(S)\n S_r = S[:r]\n U_r = U[:, :r]\n VT_r = VT[:r]\n if verbose:\n print(\"Rank:\", r, \"SVD shape:\", U_r.shape, S_r.shape, VT_r.shape)\n return U_r, S_r, VT_r", "def get_mrr(indices, targets, batch_wise=False):\n targets = targets.view(-1, 1).expand_as(indices)\n # ranks of the targets, if it appears in your indices\n hits = (targets == indices).nonzero()\n\n if len(hits) == 0:\n if batch_wise:\n return torch.zeros(targets.shape[0], 1).cuda()\n else:\n return 0\n\n ranks = hits[:, -1] + 1\n ranks = ranks.float()\n if batch_wise:\n import pdb\n # pdb.set_trace()\n buffer = torch.zeros(targets.shape[0]).cuda()\n if len(hits) > 0:\n buffer[hits[:, 0]] = torch.reciprocal(ranks)\n buffer = buffer.view(-1, 1)\n return buffer\n rranks = torch.reciprocal(ranks) # reciprocal ranks\n\n mrr = torch.sum(rranks) / targets.size(0) # / targets.size(0)\n\n return mrr.item()", "def _basis_search(equiv_lib, source_basis, target_basis):\n\n logger.debug(\"Begining basis search from %s to %s.\", source_basis, target_basis)\n\n source_basis = {\n (gate_name, gate_num_qubits)\n for gate_name, gate_num_qubits in source_basis\n if gate_name not in target_basis\n }\n\n # if source basis is empty, no work to be done.\n if not source_basis:\n return []\n\n # This is only neccessary since gates in target basis are currently reported by\n # their names and we need to have in addition the number of qubits they act on.\n target_basis_keys = [key for key in equiv_lib.keys() if key.name in target_basis]\n\n graph = equiv_lib.graph\n vis = BasisSearchVisitor(graph, source_basis, target_basis_keys)\n\n # we add a dummy node and connect it with gates in the target basis.\n # we'll start the search from this dummy node.\n dummy = graph.add_node(NodeData(key=\"key\", equivs=[(\"dummy starting node\", 0)]))\n\n try:\n graph.add_edges_from_no_data(\n [(dummy, equiv_lib.node_index(key)) for key in target_basis_keys]\n )\n rtn = None\n try:\n rustworkx.digraph_dijkstra_search(graph, [dummy], vis.edge_cost, vis)\n except StopIfBasisRewritable:\n rtn = vis.basis_transforms\n\n logger.debug(\"Transformation path:\")\n for gate_name, gate_num_qubits, params, equiv in rtn:\n logger.debug(\"%s/%s => %s\\n%s\", gate_name, gate_num_qubits, params, equiv)\n finally:\n # Remove dummy node in order to return graph to original state\n graph.remove_node(dummy)\n\n return rtn" ]
[ "0.6195965", "0.61417454", "0.57947445", "0.554299", "0.5502083", "0.53975844", "0.533245", "0.528168", "0.5239981", "0.5233992", "0.5226889", "0.5212047", "0.5168843", "0.5151811", "0.5149276", "0.5147061", "0.5105847", "0.5105718", "0.50940347", "0.5074896", "0.505985", "0.505242", "0.5049568", "0.50463647", "0.5042757", "0.5032336", "0.50239444", "0.5018989", "0.5016964", "0.50137633", "0.49846303", "0.49769932", "0.49744597", "0.49734095", "0.49709603", "0.49678975", "0.49653363", "0.4951718", "0.49462366", "0.494504", "0.49432027", "0.49426746", "0.49413386", "0.49370837", "0.49359143", "0.4925284", "0.4911771", "0.4890082", "0.48866525", "0.48833397", "0.4882391", "0.48633307", "0.48536286", "0.4846755", "0.4842209", "0.4839562", "0.48361522", "0.4825764", "0.48095286", "0.47977617", "0.47972614", "0.47953257", "0.47930095", "0.47906268", "0.47851205", "0.47764382", "0.47662646", "0.47633266", "0.47616377", "0.47542068", "0.4748385", "0.4732604", "0.4731647", "0.473032", "0.4729081", "0.47290674", "0.4715494", "0.47139597", "0.47137827", "0.47131628", "0.4709968", "0.4703933", "0.47009635", "0.46899518", "0.46864006", "0.46840188", "0.46840188", "0.46810654", "0.4679913", "0.46795997", "0.46778613", "0.46776527", "0.4675211", "0.46726567", "0.46725258", "0.4671594", "0.4655911", "0.4651747", "0.4651571", "0.4647894" ]
0.64136374
0
Create a MPI subarray mask to be used in send/recv operations between some topologies.
def create_subarray(sl_dict, data_shape): from hysop.constants import HYSOP_MPI_REAL, ORDERMPI subtypes = {} dim = len(data_shape) for rk in sl_dict.keys(): subvshape = tuple((sl_dict[rk][i].stop - sl_dict[rk][i].start for i in xrange(dim))) substart = tuple((sl_dict[rk][i].start for i in xrange(dim))) subtypes[rk] = \ HYSOP_MPI_REAL.Create_subarray(data_shape, subvshape, substart, order=ORDERMPI) subtypes[rk].Commit() return subtypes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subgraph_mask(self, size):\n init_matrix = np.random.randn(size,size)\n Tcs = csgraph.minimum_spanning_tree(init_matrix)\n mask_matrix = Tcs.toarray()\n return mask_matrix", "def generate_mask(self):\n\n polymer_length = len(self.sequence)\n protein_length = len(self.particle_order) - polymer_length\n\n if self.filter_specification == 'type':\n mask = np.in1d(self.particle_order, self.monomer_id)\n elif self.filter_specification == 'id':\n if self.molecule == 'polymer':\n offset = protein_length\n else:\n offset = 0\n mask = np.array([False] * (polymer_length + protein_length))\n absolute_id = [x+offset for x in self.monomer_id]\n mask[absolute_id] = True\n else:\n raise NotImplementedError(\"Filter is unknown. Use 'type' or 'id'!\")\n\n # if molecule == 'full', nothing needs to be done\n if self.molecule == 'polymer':\n mask[:protein_length] = [False] * protein_length\n elif self.molecule == 'protein':\n mask[protein_length:] = [False] * polymer_length\n\n return mask", "def _create_mask_part(\n self,\n shape: List[int],\n overlap: List[int],\n device: torch.device,\n dtype: torch.dtype = torch.float,\n ) -> torch.Tensor:\n assert len(shape) == 4\n zeros_size, lin_size = overlap[0:2]\n ones_size = shape[3] - (zeros_size + lin_size)\n sizes = (zeros_size, lin_size, ones_size)\n mask_parts = [\n torch.zeros(sizes[0], device=device, dtype=dtype),\n torch.linspace(0, 1, sizes[1], device=device, dtype=dtype),\n torch.ones(sizes[2], device=device, dtype=dtype),\n ]\n return (\n torch.cat(mask_parts, 0)\n .repeat(shape[2], 1)\n .repeat(shape[1], 1, 1)\n .unsqueeze(0)\n )", "def __generate_mask(self):\n mask = np.concatenate([np.ones(len(self.fixed[0])),\n np.zeros(self.num_points),\n np.ones(len(self.fixed[1]))])\n return mask", "def _prepare_mask_file(mask):\n result = np.ndarray((mask.shape[0], mask.shape[1]), dtype=np.uint8)\n for i in range(mask.shape[0]):\n for j in range(mask.shape[1]):\n\n if mask[i][j] > 0:\n result[i][j] = 1\n else:\n result[i][j] = 0\n \n return result", "def mask(self):\n mask = np.zeros((self.height, self.width))\n pts = [\n np.array(anno).reshape(-1, 2).round().astype(int)\n for anno in self.segmentation\n ]\n mask = cv2.fillPoly(mask, pts, 1)\n return mask", "def select_body_parts(mask, list_of_body_parts):\n\n new_mask = np.zeros(mask.shape).astype(np.bool)\n\n for body_part in list_of_body_parts:\n idxs = body_parts[body_part]\n\n for idx in idxs:\n m_ = (mask == idx)\n new_mask = np.bitwise_or(new_mask, m_)\n\n return new_mask.astype(np.uint8)", "def mask(self):", "def get_mask(self, shape):\n h, w = shape[0:2]\n y, x = np.mgrid[:h, :w]\n points = np.transpose((x.ravel(), y.ravel()))\n\n mask = _nxutils_points_inside_poly(points, self.verts)\n #mask = nxutils.points_inside_poly(points, self.verts)\n return mask.reshape(h, w)", "def _get_mask(self, x):\n x_mask = Variable(torch.zeros(x.size(0), self.max_seq_len).byte())\n return x_mask.cuda() if self.use_cuda else x_mask", "def create_mask(shape):\n return np.zeros(shape).astype(bool)", "def _prep_mask(dataset, trial_split):\n split_to_mask = lambda x: (dataset.trial_info.split == x) if isinstance(x, str) else x\n if isinstance(trial_split, list):\n trial_mask = np.any([split_to_mask(split) for split in trial_split], axis=0)\n else:\n trial_mask = split_to_mask(trial_split)\n return trial_mask", "def mask(self, mask):\n return MaskedDistribution(self, mask)", "def _configure_auxiliary_mask(self, auxiliary_mask):\n indices = self.indices\n\n new = [\n mask[\n tuple(\n [\n (slice(None) if n == 1 else index)\n for n, index in zip(mask.shape, indices)\n ]\n )\n ]\n for mask in auxiliary_mask\n ]\n\n # # If the partition is to be parallelised then get rid of mask\n # # components which are all False so the mask component does\n # # not get copied to the child process\n # if not config['serial']:\n # new = [mask for mask in new if not mask.any()]\n\n self.config[\"auxiliary_mask\"] = new", "def bbox2mask(self, shape, margin, bbox_shape, times):\r\n bboxs = []\r\n for i in range(times):\r\n bbox = self.random_bbox(shape, margin, bbox_shape)\r\n bboxs.append(bbox)\r\n height = shape\r\n width = shape\r\n mask = np.zeros((height, width), np.float32)\r\n for bbox in bboxs:\r\n h = int(bbox[2] * 0.1) + np.random.randint(int(bbox[2] * 0.2 + 1))\r\n w = int(bbox[3] * 0.1) + np.random.randint(int(bbox[3] * 0.2) + 1)\r\n mask[(bbox[0] + h) : (bbox[0] + bbox[2] - h), (bbox[1] + w) : (bbox[1] + bbox[3] - w)] = 1.\r\n return mask.reshape((1, ) + mask.shape).astype(np.float32)", "def bbox2mask(self, shape, margin, bbox_shape, times):\r\n bboxs = []\r\n for i in range(times):\r\n bbox = self.random_bbox(shape, margin, bbox_shape)\r\n bboxs.append(bbox)\r\n height = shape\r\n width = shape\r\n mask = np.zeros((height, width), np.float32)\r\n for bbox in bboxs:\r\n h = int(bbox[2] * 0.1) + np.random.randint(int(bbox[2] * 0.2 + 1))\r\n w = int(bbox[3] * 0.1) + np.random.randint(int(bbox[3] * 0.2) + 1)\r\n mask[(bbox[0] + h) : (bbox[0] + bbox[2] - h), (bbox[1] + w) : (bbox[1] + bbox[3] - w)] = 1.\r\n return mask.reshape((1, ) + mask.shape).astype(np.float32)", "def create_all_mask(mask, num, stride):\n scale_factor = 1.0 / stride\n small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_CUBIC)\n small_mask = small_mask[:, :, np.newaxis]\n return np.repeat(small_mask, num, axis=2)", "def create_region_mask(latitude_array, target_shape, lat_bounds):\n\n target_ndim = len(target_shape)\n\n southern_lat, northern_lat = lat_bounds\n mask_array = numpy.where((latitude_array >= southern_lat) & (latitude_array < northern_lat), False, True)\n\n mask = uconv.broadcast_array(mask_array, [target_ndim - 2, target_ndim - 1], target_shape)\n assert mask.shape == target_shape \n\n return mask", "def compute_mask_indices(\n shape: Tuple[int, int],\n padding_mask: Optional[torch.Tensor],\n mask_prob: float,\n mask_length: int,\n mask_type: str = \"static\",\n mask_other: float = 0.0,\n min_masks: int = 0,\n no_overlap: bool = False,\n min_space: int = 0,\n) -> np.ndarray:\n\n bsz, all_sz = shape\n mask = np.full((bsz, all_sz), False)\n\n all_num_mask = int(\n # add a random number for probabilistic rounding\n mask_prob * all_sz / float(mask_length)\n + np.random.rand()\n )\n\n all_num_mask = max(min_masks, all_num_mask)\n\n mask_idcs = []\n for i in range(bsz):\n if padding_mask is not None:\n sz = all_sz - padding_mask[i].long().sum().item()\n num_mask = int(\n # add a random number for probabilistic rounding\n mask_prob * sz / float(mask_length)\n + np.random.rand()\n )\n num_mask = max(min_masks, num_mask)\n else:\n sz = all_sz\n num_mask = all_num_mask\n\n if mask_type == \"static\":\n lengths = np.full(num_mask, mask_length)\n elif mask_type == \"uniform\":\n lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)\n elif mask_type == \"normal\":\n lengths = np.random.normal(mask_length, mask_other, size=num_mask)\n lengths = [max(1, int(round(x))) for x in lengths]\n elif mask_type == \"poisson\":\n lengths = np.random.poisson(mask_length, size=num_mask)\n lengths = [int(round(x)) for x in lengths]\n else:\n raise Exception(\"unknown mask selection \" + mask_type)\n\n if sum(lengths) == 0:\n lengths[0] = min(mask_length, sz - 1)\n\n if no_overlap:\n mask_idc = []\n\n def arrange(s, e, length, keep_length):\n span_start = np.random.randint(s, e - length)\n mask_idc.extend(span_start + i for i in range(length))\n\n new_parts = []\n if span_start - s - min_space >= keep_length:\n new_parts.append((s, span_start - min_space + 1))\n if e - span_start - keep_length - min_space > keep_length:\n new_parts.append((span_start + length + min_space, e))\n return new_parts\n\n parts = [(0, sz)]\n min_length = min(lengths)\n for length in sorted(lengths, reverse=True):\n lens = np.fromiter(\n (e - s if e - s >= length + min_space else 0 for s, e in parts),\n np.int,\n )\n l_sum = np.sum(lens)\n if l_sum == 0:\n break\n probs = lens / np.sum(lens)\n c = np.random.choice(len(parts), p=probs)\n s, e = parts.pop(c)\n parts.extend(arrange(s, e, length, min_length))\n mask_idc = np.asarray(mask_idc)\n else:\n min_len = min(lengths)\n if sz - min_len <= num_mask:\n min_len = sz - num_mask - 1\n\n mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)\n\n mask_idc = np.asarray(\n [\n mask_idc[j] + offset\n for j in range(len(mask_idc))\n for offset in range(lengths[j])\n ]\n )\n\n mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))\n\n min_len = min([len(m) for m in mask_idcs])\n for i, mask_idc in enumerate(mask_idcs):\n if len(mask_idc) > min_len:\n mask_idc = np.random.choice(mask_idc, min_len, replace=False)\n mask[i, mask_idc] = True\n\n return mask", "def mask2whole_mask(mask, bbox, size):\n if len(mask) != len(bbox):\n raise ValueError('The length of mask and bbox should be the same')\n R = len(mask)\n H, W = size\n whole_mask = np.zeros((R, H, W), dtype=np.bool)\n\n for i, (m, bb) in enumerate(zip(mask, bbox)):\n bb = np.round(bb).astype(np.int32)\n whole_mask[i, bb[0]:bb[2], bb[1]:bb[3]] = m\n return whole_mask", "def submask(mask, region):\n mask = mask[region['blc'][0]:region['trc'][0]+1, region['blc'][1]:region['trc'][1]+1]\n return mask", "def maskArr(arrIn, segMap, maskID):\n return np.logical_and(segMap != maskID, segMap != 0)", "def _build_mask(self, xg, yg):\n\n # 1. create mask based on meshes\n points = np.vstack((xg.flatten(), yg.flatten())).T\n\n # 2. extract edge points using el_pos\n edge_points = self.node[np.arange(16)]\n path = Path(edge_points, closed=False)\n mask = path.contains_points(points)\n\n return mask", "def minimize_mask(bbox, mask, mini_shape):\n mini_mask = np.zeros(mini_shape + (mask.shape[-1],), dtype=bool)\n for i in range(mask.shape[-1]):\n # Pick slice and cast to bool in case load_mask() returned wrong dtype\n m = mask[:, :, i].astype(bool)\n y1, x1, y2, x2 = bbox[i][:4]\n m = m[y1:y2, x1:x2]\n if m.size == 0:\n raise Exception(\"Invalid bounding box with area of zero\")\n # Resize with bilinear interpolation\n m = resize(m, mini_shape)\n mini_mask[:, :, i] = np.around(m).astype(np.bool)\n return mini_mask", "def mask_codes_op(base_array, codes_array):\r\n result = numpy.empty(base_array.shape, dtype=numpy.int8)\r\n result[:] = mask_nodata\r\n valid_mask = base_array != base_nodata\r\n result[valid_mask] = numpy.isin(\r\n base_array[valid_mask], codes_array)\r\n return result", "def as_mask(annotations: Sequence[Annotation],\n size: int,\n fs: float,\n include: bool = True,\n) -> npt.NDArray[np.bool_]:\n\n epochs = [(ann.time, ann.time + ann.duration) for ann in annotations]\n samples = np.round(np.array(epochs) * fs).astype(int)\n slices = [slice(*pts) for pts in samples]\n result = arraytools.filter1D(size, slices)\n result = result if include else ~result\n return cast(np.ndarray, result)", "def _mask(self) -> np.ndarray:\n mask = np.ones(self.limits, dtype=bool)\n for ax, shape, limit in zip(\n range(1, len(self.limits)), self.shape, self.limits[1:]\n ):\n ax_mask = np.arange(limit) < np.expand_dims(shape, 1)\n new_shape = np.ones(len(self.limits), dtype=int)\n new_shape[0], new_shape[ax] = self.limits[0], limit\n mask = mask & ax_mask.reshape(*new_shape)\n return mask", "def generate_effective_mask(self, mask_size: tuple, polygons_ignore):\n mask = np.ones(mask_size, dtype=np.uint8)\n\n for poly in polygons_ignore:\n instance = poly.astype(np.int32).reshape(1, -1, 2)\n cv2.fillPoly(mask, instance, 0)\n\n return mask", "def _initalize_mask(dataSubStack):\n # Initalize an array to store the output mask values\n outMask = np.zeros(dataSubStack.shape, dtype=bool)\n\n # Start by masking out NaNs or Infs\n NaNsOrInfs = np.logical_not(np.isfinite(dataSubStack.data))\n dataSubStack.mask = NaNsOrInfs\n\n return outMask, dataSubStack", "def _build_mask(\n self,\n position: int,\n grid_dim: int,\n rot_list: List[int],\n shape: List[int],\n ovlp: List[int],\n device: torch.device = torch.device(\"cpu\"),\n dtype: torch.dtype = torch.float,\n ) -> torch.Tensor:\n assert len(shape) == 4\n\n # Mask right / bottom side\n if position == 0:\n mask = self._create_mask_part(shape, ovlp, device, dtype).rot90(\n rot_list[0], [2, 3]\n )\n # Mask left & right or top & bottom sides\n elif position > 0 and position < grid_dim - 1:\n mask = self._create_mask_part(shape, ovlp, device, dtype).rot90(\n rot_list[0], [2, 3]\n )\n mask = mask * self._create_mask_part(shape, ovlp, device, dtype).rot90(\n rot_list[1], [2, 3]\n )\n # Mask left / top side\n else:\n mask = self._create_mask_part(shape, ovlp, device, dtype).rot90(\n rot_list[1], [2, 3]\n )\n return mask", "def get_mask(total, begin, end):\n mask = np.zeros([total]).astype(np.float32)\n mask[begin:end] = 1\n return np.array(mask, dtype=np.bool)", "def generate_mask(data, tps, length, tp_union):\n tp_map = {tp_union[i].item(): i for i in range(len(tp_union))}\n\n mask = np.zeros((data.shape[0], tp_union.shape[0]))\n e_data = torch.zeros((data.shape[0], tp_union.shape[0], data.shape[2]))\n e_data = e_data.to(data.device)\n r_arr = []\n\n for i in range(len(mask)):\n inds = [tp_map[tps[i][j].item()] for j in range(length[i])]\n mask[i, inds] = 1\n e_data[i, inds] = data[i, :length[i]]\n r_arr.append(np.where(mask[i] == 1)[0])\n\n return mask, e_data, r_arr", "def get_contest_mask():\n return createmaskdf(\"data/fcstrodeo_nctemplates/fcstrodeo_mask.nc\")", "def mod_mask(self):\n # Check the *_masq values\n self.__log.debug(\"Checking the *_masq arrays\")\n # Retrieve the kid boxes\n masq_names = np.unique([\"{}_masq\".format(item[1]) for item in self.list_detector])\n self.__check_attributes(masq_names, read_missing=False)\n # Check that they are all the same\n warnings.warn(\"Temporary fix to int8\")\n masqs = [getattr(self, masq).astype(np.int8) for masq in masq_names]\n\n if np.any(np.std(masqs, axis=0) != 0):\n self.__log.error(\"*_masq is varying -- Please check : {}\".format(pprint_list(masq_names, \"_masq\")))\n\n # AB private comm) main_flag should be the bitwise_or of all boxes\n # Well not exactly....\n # cast into 8 bit, is more than enough, only 3 bits used anyway...\n masq = np.bitwise_or.reduce(masqs, axis=0).astype(np.int8)\n\n # AB (#CONCERTO_DAQ January 11 13:02)\n # _flag_balayage_en_cours & _flag_blanking_synthe\n # Ainsi on aura la modulation en bit0 et 1 et le flag blanking en bit\n # AB (#CONCERTO_DAQ February 11 11:07)\n # bit 1 & 2 code the modulation as a signed integer -1 0 1 : 11 00 01 ie 3 0 1\n # bit 3 is a blanking bit, which does not exist for KISS, but should not be taken into account for CONCERTO\n\n # Thus as a temporary fix, let's clear the 3rd bit, actually a bad idea...\n # self.__log.warning(\"Temporary fix : clearing the 3rd bit of masq\")\n # masq = masq & ~(1 << 2)\n\n return masq", "def get_part_mask(densepose_map):\r\n # Group of body parts. Each group contains IDs of body labels in DensePose.\r\n # The 9 groups here are: background, torso, hands, feet, upper legs, lower legs,\r\n # upper arms, lower arms, head.\r\n part_groups = [[0], [1, 2], [3, 4], [5, 6], [7, 9, 8, 10], [11, 13, 12, 14],\r\n [15, 17, 16, 18], [19, 21, 20, 22], [23, 24]]\r\n n_parts = len(part_groups)\r\n\r\n densepose_map = densepose_map.numpy()\r\n need_reshape = len(densepose_map.shape) == 4\r\n if need_reshape:\r\n bo, t, h, w = densepose_map.shape\r\n densepose_map = np.reshape(densepose_map, (-1, h, w))\r\n b, h, w = densepose_map.shape\r\n part_map = (densepose_map / 2 + 0.5) * 24\r\n assert np.all(part_map >= 0) and np.all(part_map < 25)\r\n\r\n mask = np.zeros((b, n_parts, h, w)).astype(\"bool\")\r\n for i in range(n_parts):\r\n for j in part_groups[i]:\r\n # Account for numerical errors.\r\n mask[:, i] = np.logical_or(mask[:, i],\r\n np.logical_and((part_map > j - 0.1), (part_map < j + 0.1)))\r\n if need_reshape:\r\n mask = np.reshape(mask, (bo, t, -1, h, w))\r\n mask = dg.to_variable(mask.astype(\"float32\"))\r\n return mask", "def mask(self, mask):\n ds_out = self._obj\n for var in self.vars:\n ds_out[var] = ds_out[var].raster.mask(mask)\n return ds_out", "def get_sample_mask(self):", "def _local_mask(self, sparsity):\n for mask, param in self.masked_parameters:\n score = self.scores[id(param)]\n k = int((1.0 - sparsity) * score.numel())\n if not k < 1:\n threshold, _ = torch.kthvalue(torch.flatten(score), k)\n zero = torch.tensor([0.]).to(mask.device)\n one = torch.tensor([1.]).to(mask.device)\n mask.copy_(torch.where(score <= threshold, zero, one))", "def make_mask(data, pad):\n def subsequent_mask(size):\n \"\"\" helper function for creating the masks. \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n mask = (data != pad).unsqueeze(-2)\n mask = mask & Variable(\n subsequent_mask(data.size(-1)).type_as(mask.data))\n return mask", "def mask_var(self, data):\n return data.where(_make_mask(data, self.mask_bounds))", "def build_mask(mask, unused_features_positions):\n\tfinal_mask = mask.tolist()\n\n\tfor i in range(len(unused_features_positions)):\n\t\tif not unused_features_positions[i]:\n\t\t\tfinal_mask.insert(i, False)\n\n\treturn final_mask", "def QACloudMaskLocal(d2_array):\n d2_array = d2_array.astype(np.uint16)\n pattern = np.uint16(int('1010000000000000', 2))\n return np.bitwise_and(d2_array,pattern) > 0", "def mask(self):\n return np.ones((self.size, self.size))", "def make_mask(data, pad):\n\n def subsequent_mask(size):\n \"\"\" helper function for creating the masks. \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n mask = (data != pad).unsqueeze(-2)\n mask = mask & Variable(\n subsequent_mask(data.size(-1)).type_as(mask.data))\n return mask", "def test_get_mask(self):\n\n spine_data_loader = SpineDataLoader(dirpath_data=self.dirpath,\n batch_size=4)\n\n for idx in range(4):\n mask = spine_data_loader.get_mask(str(idx))\n assert mask.shape == (256, 256, 1)\n assert mask.dtype == 'int64'", "def create_mask(predictions_2d, sizeX, sizeY, chip_shape):\n\n # reshape predictions_2d\n predictions_2d_res = np.array(predictions_2d)\n predictions_2d_res = predictions_2d_res.reshape(sizeX, sizeY)\n\n # create new mask of area of interest\n new_mask = np.zeros((chip_shape[1], chip_shape[2]))\n for x in range(0, chip_shape[1], 256):\n for y in range(0, chip_shape[2], 256):\n new_mask[x:x + 256, y:y + 256] = predictions_2d_res[x / 256][y / 256]\n\n return new_mask", "def __set_mask_regions(self):\n self.bottom_clip = np.int32(np.int32([[[60,0], [1179,0], [1179,650], [60,650]]]))\n self.roi_clip = np.int32(np.int32([[[640, 425], [1179,550], [979,719],\n [299,719], [100, 550], [640, 425]]]))", "def prepareMask(self, mask):\n\n # Make sure that the mask has the same\n # number of voxels as the atlas image.\n # Use nearest neighbour interpolation\n # for resampling, as it is most likely\n # that the mask is binary.\n try:\n mask, xform = resample.resample(\n mask, self.shape[:3], dtype=np.float32, order=0)\n\n except ValueError:\n raise MaskError('Mask has wrong number of dimensions')\n\n # TODO allow non-aligned mask - as long as it overlaps\n # in world coordinates, it should be allowed\n if not fslimage.Image(mask, xform=xform).sameSpace(self):\n raise MaskError('Mask is not in the same space as atlas')\n\n return mask", "def test(shape=(1000,2000)):\n mask = Mask()\n mask.addCircle(400,300,250)\n mask.subtractCircle(400,300,150)\n mask.addRectangle(350,250,1500,700)\n plt.imshow( mask.getMask(shape) )\n return mask", "def generate_mask_aligned(data, length, tp_union):\n mask = np.zeros((data.shape[0], tp_union.shape[0]))\n e_data = torch.zeros((data.shape[0], tp_union.shape[0], data.shape[2]))\n e_data = e_data.to(data.device)\n r_arr = []\n\n for i, l in enumerate(length):\n mask[i, :l] = 1\n e_data[i, :l] = data[i, :l]\n r_arr.append(np.where(mask[i] == 1)[0])\n\n return mask, e_data, r_arr", "def get_mask(self, anno, img_info) -> np.ndarray:\n m = np.zeros((img_info[\"height\"], img_info[\"width\"]), dtype=np.float32)\n\n for obj in anno:\n if obj[\"iscrowd\"]:\n rle = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n m += mask\n elif obj[\"num_keypoints\"] == 0:\n rles = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n for rle in rles:\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n\n m += mask\n\n return (m < 0.5).astype(np.float32)", "def expand_mask(bbox, mini_mask, image_shape):\n mask = np.zeros(image_shape[:2] + (mini_mask.shape[-1],), dtype=bool)\n for i in range(mask.shape[-1]):\n m = mini_mask[:, :, i]\n y1, x1, y2, x2 = bbox[i][:4]\n h = y2 - y1\n w = x2 - x1\n # Resize with bilinear interpolation\n m = resize(m, (h, w))\n mask[y1:y2, x1:x2, i] = np.around(m).astype(np.bool)\n return mask", "def trajectory_masker(trajectory, split_start, split_length):\r\n input_mask = np.zeros(np.shape(trajectory), dtype=bool)\r\n output_mask = np.zeros(np.shape(trajectory), dtype=bool)\r\n\r\n input_mask[:, split_start:] = 1\r\n output_mask[:, :split_start+split_length] = 1\r\n # print(np.ma.masked_array(trajectory, mask=mask_array))\r\n return np.ma.masked_array(trajectory, mask=input_mask), np.ma.masked_array(trajectory, mask=output_mask)", "def _mask_grid(self):\n xg, yg = self._build_grid()\n mask = self._build_mask(xg, yg)\n mask = mask.reshape(xg.shape)\n\n return xg, yg, mask", "def get_vertices_mask(poly, mask):\n h = mask.shape[0]\n w = mask.shape[1]\n gt_poly = np.zeros((poly.shape[0],poly.shape[1]),np.int32)\n gt_poly[:,0] = np.floor(poly[:,0]*w)\n gt_poly[:,1] = np.floor(poly[:,1]*h)\n\n mask[gt_poly[:, 1], gt_poly[:, 0]] = 1.0\n\n return mask", "def cartesian_mask(self, shape, centred=False, uniform=True):\n\t\tR = self.R\n\t\tsample_n = self.sample_n\n\t\tif uniform:\n\t\t\tN, Nx, Ny = int(np.prod(shape[:-2])), shape[-2], shape[-1]\n\t\t\tn_lines = int(Nx / R)\n\t\t\t\n\t\t\tmask = np.zeros((N, Nx, Ny))\n\t\t\tfor i in range(N):\n\t\t\t\tidx = np.arange(0,Nx,R)\n\t\t\t\tmask[i, idx, :] = 1\n\t\t\t\t\n\t\t\tif sample_n:\n\t\t\t\tmask[:, Nx//2-sample_n//2:(Nx//2+sample_n//2),:] = 1\n \n\t\telse:\n\t\t\tN, Nx, Ny = int(np.prod(shape[:-2])), shape[-2], shape[-1]\n\t\t\tpdf_x = self.normal_pdf(Nx, 0.5/(Nx/10.)**2)\n\t\t\tlmda = Nx/(2.*R)\n\t\t\tn_lines = int(Nx / R)\n \n # add uniform distribution\n\t\t\tpdf_x += lmda * 1./Nx\n \n\t\t\tif sample_n:\n\t\t\t\tpdf_x[Nx//2-sample_n//2:Nx//2+sample_n//2] = 0\n\t\t\t\tpdf_x /= np.sum(pdf_x)\n\t\t\t\tn_lines -= sample_n\n \n\t\t\tmask = np.zeros((N, Nx))\n\t\t\tfor i in range(N):\n\t\t\t\tidx = np.random.choice(Nx, n_lines, False, pdf_x)\n\t\t\t\tmask[i, idx] = 1\n \n\t\t\tif sample_n:\n\t\t\t\tmask[:, Nx//2-sample_n//2:Nx//2+sample_n//2] = 1\n \n\t\t\tsize = mask.itemsize\n\t\t\tmask = as_strided(mask, (N, Nx, Ny), (size * Nx, size, 0))\n \n\t\t\tmask = mask.reshape(shape)\n \n\t\tif not centred:\n\t\t\tmask = np.fft.ifftshift(mask, axes=(-1, -2))\n \n\t\treturn mask", "def makeMaskFromArray(array):\n if array is None: return None\n cls = globals()[\"Mask%s\" % suffixes[str(array.dtype.type)]]\n return cls(array)", "def mask(self):\n return type(self)(self.data.mask, self.bset)", "def get_overlap_mask(self):\n self.overlap_mask = np.bitwise_and(\n self.structure_mask.astype(np.bool), self.unknown_mask.astype(np.bool)\n )", "def expand_mask(bbox, mini_mask, image_shape):\n mask = np.zeros(image_shape[:2] + (mini_mask.shape[-1],), dtype=bool)\n for i in range(mask.shape[-1]):\n m = mini_mask[:, :, i]\n y1, x1, y2, x2 = bbox[i][:4]\n h = y2 - y1\n w = x2 - x1\n m = scipy.misc.imresize(m.astype(float), (h, w), interp='bilinear')\n mask[y1:y2, x1:x2, i] = np.where(m >= 128, 1, 0)\n return mask", "def simple(onArray, offArray):\n \n Larray = len(onArray)\n Larray2 = len(offArray)\n \n assert Larray == Larray2, \"both arrays should have the same size\"\n \n #onFiltered = numpy.array(onArray)[:,OnOff.misc.constants.dataRange]\n #offFiltered = numpy.array(offArray)[:,OnOff.misc.constants.dataRange]\n \n #return onFiltered,offFiltered,OnOff.misc.constants.dataRange\n drange = OnOffCalc.misc.getDatarange(onArray.shape[1])\n dataMask = numpy.ones(onArray.shape)\n #dataMask[:,OnOffCalc.misc.constants.dataRange] = 0\n dataMask[:,drange] = 0\n \n return dataMask", "def _find_masks(batch, min_size=10):\n result = []\n for b in batch:\n assert b.shape[0] == 1\n patch = b[0]\n z_sum = patch.sum(axis=(1, 2))\n coords = np.where(z_sum > min_size)[0]\n if len(coords) > 0:\n ind = coords[len(coords) // 2]\n result.append(b[:, ind:ind + 1, ...])\n else:\n ind = b.shape[1] // 2\n result.append(b[:, ind:ind + 1, ...])\n\n return np.stack(result, axis=0)", "def minimize_mask(bbox, mask, mini_shape):\n mini_mask = np.zeros(mini_shape + (mask.shape[-1],), dtype=bool)\n for i in range(mask.shape[-1]):\n m = mask[:, :, i]\n y1, x1, y2, x2 = bbox[i][:4]\n m = m[y1:y2, x1:x2]\n if m.size == 0:\n raise Exception(\"Invalid bounding box with area of zero\")\n m = scipy.misc.imresize(m.astype(float), mini_shape, interp='bilinear')\n mini_mask[:, :, i] = np.where(m >= 128, 1, 0)\n return mini_mask", "def mask_full(self):\n size = self.size\n\n if self.name == \"KEEP\":\n m = np.zeros((2*size+1,size+5), dtype = np.int8)\n m[:size,:size] = np.ones((size,size)) # keep\n m[size:size+1,2:5] = np.ones((1,3)) # keepdoor\n m[size+1:2*size+1,:size] = np.ones((size,size)) # campfire\n m[2:7,size:size+5] = np.ones((5,5)) # stockpile\n elif self.name in [\"OIL_SMELTER\", \"ENGINEERS_GUILD\", \"TUNNELORS_GUILD\"]:\n m = np.ones((2*size, size), dtype=np.int8)\n elif self.name in [\"MERCENARY_POST\", \"BARRACKS\"]:\n m = np.ones((2*size, 2*size), dtype=np.int8)\n else:\n m = self.mask()\n return m", "def crop_to_nonzero(arrayin, mask=None):\r\n\r\n if type(arrayin) == np.ndarray :\r\n array = arrayin\r\n elif type(arrayin) == list :\r\n array = arrayin[0]\r\n\r\n if mask==None :\r\n mask = array\r\n #most left point \r\n for i in range(mask.shape[1]):\r\n tot = np.sum(np.abs(mask[:,i]))\r\n if tot > 0.0 :\r\n break\r\n left = i\r\n #most right point \r\n for i in range(mask.shape[1]-1,-1,-1):\r\n tot = np.sum(np.abs(mask[:,i]))\r\n if tot > 0.0 :\r\n break\r\n right = i\r\n #most up point \r\n for i in range(mask.shape[0]):\r\n tot = np.sum(np.abs(mask[i,:]))\r\n if tot > 0.0 :\r\n break\r\n top = i\r\n #most down point\r\n for i in range(mask.shape[0]-1,-1,-1):\r\n tot = np.sum(np.abs(mask[i,:]))\r\n if tot > 0.0 :\r\n break\r\n bottom = i\r\n if type(arrayin) == np.ndarray :\r\n arrayout = array[top:bottom+1,left:right+1]\r\n elif type(arrayin) == list :\r\n arrayout = []\r\n for i in arrayin :\r\n arrayout.append(i[top:bottom+1,left:right+1])\r\n return arrayout", "def whole_mask2mask(whole_mask, bbox):\n if len(whole_mask) != len(bbox):\n raise ValueError(\n 'The length of whole_mask and bbox should be the same')\n mask = list()\n for whole_m, bb in zip(whole_mask, bbox):\n bb = np.round(bb).astype(np.int32)\n mask.append(whole_m[bb[0]:bb[2], bb[1]:bb[3]])\n return mask", "def generate_mask(sequence_length):\n if FLAGS.mask_strategy == 'random':\n mask = []\n for seq in range(FLAGS.batch_size):\n p = np.random.choice(\n [True, False],\n size=[sequence_length[seq]],\n p=[FLAGS.is_present_rate, 1. - FLAGS.is_present_rate])\n while p.size<FLAGS.sequence_length:\n p = np.append(p, np.array([True]))\n mask.append(p)\n p = np.array(mask)\n\n elif FLAGS.mask_strategy == 'contiguous':\n mask = []\n for seq in range(FLAGS.batch_size):\n masked_length = int((1 - FLAGS.is_present_rate) * sequence_length[seq]) - 1\n # Determine location to start masking.\n start_mask = np.random.randint(\n 1, sequence_length[seq] - masked_length + 1, size=None)\n p = np.full([sequence_length[seq]], True, dtype=bool)\n #print(masked_length)\n # Create contiguous masked section to be False.\n p[start_mask:start_mask + masked_length] = False\n #print(p)\n\n while p.size<FLAGS.sequence_length:\n #print(p.size, FLAGS.sequence_length)\n #input('maskk')\n p = np.append(p, np.array([True]))\n #print(p)\n mask.append(p)\n p = np.array(mask) \n else:\n raise NotImplementedError\n\n return p", "def mask_neighbors(self, mask, rad=9, ptrn='r'):\n return um.mask_neighbors(mask, rad, ptrn)", "def makeSubapMap():\n a=numpy.zeros((sum(nsub),),numpy.int32)\n subFlag=subapFlag.copy()\n for i in range(NNGSCAM+NLGSOCAM+NBOBCAT):#ngs 1-3, truth, lgs, lofs, hofs\n tmp=subFlag[nsub[:i].sum():nsub[:i+1].sum()]\n tmp.shape=nsuby[i],nsubx[i]\n if i==NNGSCAM+NLGSOCAM:#lofs\n tmp[:]=sfNoObs*(i+1)\n elif i==1+NNGSCAM+NLGSOCAM:#hofs\n tmp[:]=sf14NoObs*(i+1)\n elif i==NNGSCAM:#lgs\n for j in range(4):\n jj=6-j\n tmp[j*2]=individualSubapFlag[jj]*(i+1)\n if j!=3:\n tmp[j*2+1]=individualSubapFlag[j]*(i+1)\n #jj=7-j\n #if jj<7:\n # tmp[j*2-1]=individualSubapFlag[jj]*(i+1)\n #tmp[j*2]=individualSubapFlag[j]*(i+1)\n else:\n tmp[:]=individualSubapFlag*(i+1)\n return subFlag", "def _source_mask(self, ilens):\n x_masks = make_non_pad_mask(ilens)\n return x_masks.unsqueeze(-2)", "def _create_observation_mask(self):\n\n\n if self.BLUE_PARTIAL:\n centers, radii = [], []\n for agent in self._team_blue:\n if not agent.isAlive: continue\n centers.append(agent.get_loc())\n radii.append(agent.range)\n self._blue_mask = self._create_vision_mask(centers, radii)\n if self.TEAM_MEMORY == \"fog\":\n self.blue_memory = np.logical_and(self.blue_memory, self._blue_mask)\n else:\n self._blue_mask = np.zeros_like(self._static_map, dtype=bool)\n\n if self.RED_PARTIAL:\n centers, radii = [], []\n for agent in self._team_red:\n if not agent.isAlive: continue\n centers.append(agent.get_loc())\n radii.append(agent.range)\n self._red_mask = self._create_vision_mask(centers, radii)\n if self.TEAM_MEMORY == \"fog\":\n self.red_memory = np.logical_and(self.red_memory, self._red_mask)\n else:\n self._red_mask = np.zeros_like(self._static_map, dtype=bool)", "def get_regions_mask(self, input):", "def reduce_X(X, mask):\n return X[:, mask]", "def idx_to_mask(idx, shape):\n output = np.zeros(shape)\n output[idx] = 1\n return output.astype(np.bool)", "def cmask(self):\n mask = np.zeros(18)\n if 'full' in self.CONS: mask[:] = 1\n if 'f0' in self.CONS: mask[0] = 1\n if 'f1' in self.CONS: mask[1:4] = 1\n if 'f2' in self.CONS: mask[4:10] = 1\n if 'vx' in self.CONS: mask[10] = 1\n if 'vy' in self.CONS: mask[11] = 1\n if 'vz' in self.CONS: mask[12] = 1\n if 'TG' in self.CONS: mask[13:18] = 1\n return mask>0", "def _get_one_fixed_mask(self, param_name):\n bounds = self._get_one_bound(param_name)\n return array([b == False for b in bounds])", "def offset_mask(mask):\n def axis_data(axis):\n \"\"\"Gets the bounds of a masked area along a certain axis\"\"\"\n x = mask.sum(axis)\n trimmed_front = N.trim_zeros(x,\"f\")\n offset = len(x)-len(trimmed_front)\n size = len(N.trim_zeros(trimmed_front,\"b\"))\n return offset,size\n\n xo,xs = axis_data(0)\n yo,ys = axis_data(1)\n\n array = mask[yo:yo+ys,xo:xo+xs]\n offset = (yo,xo)\n return offset, array", "def scatter_work(array, mpi_rank, mpi_size, root=0, dtype=np.int32):\n if mpi_rank == root:\n print(f\"Scattering array to {mpi_size} ranks\")\n scatter_total = array.size\n mod = scatter_total % mpi_size\n if mod != 0:\n print(\"Padding array for scattering...\")\n pad = -1 * np.ones(mpi_size - mod, dtype=dtype)\n array = np.concatenate((array, pad))\n scatter_total += mpi_size - mod\n assert scatter_total % mpi_size == 0\n assert scatter_total == array.size\n else:\n scatter_total = None\n\n scatter_total = comm.bcast(scatter_total, root=root)\n subset = np.empty(scatter_total//mpi_size, dtype=dtype)\n comm.Scatter(array, subset, root=root)\n\n return subset", "def subsetmask(df, mask_df):\r\n return pd.merge(df, mask_df, on=['lat', 'lon'], how='inner')", "def build_mask(dqarr, bitvalue):\n bitvalue = interpret_bit_flags(bitvalue, mnemonic_map=pixel)\n\n if bitvalue is None:\n return (np.ones(dqarr.shape, dtype=np.uint8))\n return np.logical_not(np.bitwise_and(dqarr, ~bitvalue)).astype(np.uint8)", "def getmask(\n self, mask: Union[List[dd.Series], UserList], inverse: bool = False\n ) -> List[dd.Series]:\n output = []\n for data, cond in zip(self.data, mask):\n if inverse:\n output.append(data[~cond])\n else:\n output.append(data[cond])\n\n return output", "def set_measurement_mask(self, program_name, mask_name, begins, lengths) -> Tuple[numpy.ndarray, numpy.ndarray]:", "def subsequent_mask(mask_size):\n mask_shape = (1, mask_size, mask_size)\n # Create a lower-triangle matrix at the primary diagonal (0th)\n # such that all the elements above the diagonal are 0.\n mask = np.tril(np.ones(mask_shape), k=0).astype('uint8')\n mask = torch.from_numpy(mask)\n return mask", "def _make_mask(data, mask_bounds):\n # For each set of bounds add to the conditional.\n mask = False\n for lat_bounds, lon_bounds in mask_bounds:\n mask |= _add_to_mask(data, lat_bounds, lon_bounds)\n return mask", "def edge_purity_mask(edge_index: nb.int64[:,:],\n part_ids: nb.int64[:],\n group_ids: nb.int64[:],\n primary_ids: nb.int64[:]) -> nb.boolean[:]:\n purity_mask = np.ones(len(edge_index), dtype=np.bool_)\n for g in np.unique(group_ids):\n group_mask = np.where(group_ids == g)[0]\n if np.sum(primary_ids[group_mask]) != 1 and len(np.unique(part_ids[group_mask][primary_ids[group_mask] == 1])) != 1:\n edge_mask = np.empty(len(edge_index), dtype=np.bool_)\n for k, e in enumerate(edge_index):\n edge_mask[k] = (e[0] == group_mask).any() & (e[1] == group_mask).any()\n purity_mask[edge_mask] = np.zeros(np.sum(edge_mask))\n\n return purity_mask", "def get_fg_mask(densepose_map, has_fg):\r\n if type(densepose_map) == list:\r\n return [get_fg_mask(label, has_fg) for label in densepose_map]\r\n if not has_fg or densepose_map is None:\r\n return 1\r\n if len(densepose_map.shape) == 5:\r\n densepose_map = densepose_map[:, 0]\r\n # Get the body part map from DensePose.\r\n mask = densepose_map[:, 2:3]\r\n\r\n # Make the mask slightly larger.\r\n mask = L.pool2d(mask, pool_size=15, pool_type='max', pool_stride=1, pool_padding=7)\r\n # mask = dg.to_variable(((mask > -1).numpy().astype(\"float32\")))\r\n mask = P.cast((mask > -1), \"float32\")\r\n return mask", "def binary_mask_fn(input_shape, masking, mask_state):\n if masking == 'channel':\n assert(input_shape[-1] % 2 == 0)\n sub_shape = np.copy(input_shape)\n sub_shape[-1] = sub_shape[-1] // 2\n binary_mask = np.concatenate([np.ones(sub_shape),\n np.zeros(sub_shape)],\n axis=-1)\n if masking == 'checkerboard':\n assert(len(input_shape) == 3)\n column_odd = [k % 2 for k in range(input_shape[-2])]\n column_even = [(k + 1) % 2 for k in range(input_shape[-2])]\n binary_mask = np.zeros((input_shape[-3], input_shape[-2]))\n for j in range(input_shape[-2]):\n if j % 2:\n binary_mask[:, j] = column_even\n else:\n binary_mask[:, j] = column_odd\n binary_mask = binary_mask.reshape(\n list(binary_mask.shape) + [1])\n binary_mask = np.repeat(binary_mask, input_shape[-1], axis=-1)\n\n binary_mask = binary_mask.reshape([1] + list(binary_mask.shape))\n if mask_state:\n return tf.cast(binary_mask, tf.float32)\n else:\n return tf.cast((1 - binary_mask), tf.float32)", "def _get_mask(self, anno, idx):\n coco = self.coco\n img_info = coco.loadImgs(self.img_ids[idx])[0]\n\n m = np.zeros((img_info['height'], img_info['width']), dtype=np.float32)\n\n for obj in anno:\n if 'segmentation' in obj:\n if obj['iscrowd']:\n rle = pycocotools.mask.frPyObjects(obj['segmentation'],\n img_info['height'],\n img_info['width'])\n m += pycocotools.mask.decode(rle)\n elif obj['num_keypoints'] == 0:\n rles = pycocotools.mask.frPyObjects(obj['segmentation'],\n img_info['height'],\n img_info['width'])\n for rle in rles:\n m += pycocotools.mask.decode(rle)\n\n return m < 0.5", "def _get_mask(self, struct1, struct2, fu, s1_supercell):\n mask = np.zeros((len(struct2), len(struct1), fu), dtype=np.bool)\n\n inner = []\n for sp2, i in itertools.groupby(enumerate(struct2.species_and_occu),\n key=lambda x: x[1]):\n i = list(i)\n inner.append((sp2, slice(i[0][0], i[-1][0]+1)))\n\n for sp1, j in itertools.groupby(enumerate(struct1.species_and_occu),\n key=lambda x: x[1]):\n j = list(j)\n j = slice(j[0][0], j[-1][0]+1)\n for sp2, i in inner:\n mask[i, j, :] = not self._comparator.are_equal(sp1, sp2)\n\n if s1_supercell:\n mask = mask.reshape((len(struct2), -1))\n else:\n # supercell is of struct2, roll fu axis back to preserve\n # correct ordering\n mask = np.rollaxis(mask, 2, 1)\n mask = mask.reshape((-1, len(struct1)))\n\n # find the best translation indices\n i = np.argmax(np.sum(mask, axis=-1))\n inds = np.where(np.invert(mask[i]))[0]\n if s1_supercell:\n # remove the symmetrically equivalent s1 indices\n inds = inds[::fu]\n return np.array(mask, dtype=np.int_), inds, i", "def gen_mask(components):\n masks_segments = components[7]\n hh = components[1]\n ww = components[2]\n\n if masks_segments:\n mask_miss = np.ones((hh, ww), dtype=np.uint8)\n for seg in masks_segments:\n bin_mask = maskUtils.decode(seg)\n bin_mask = np.logical_not(bin_mask)\n mask_miss = np.bitwise_and(mask_miss, bin_mask)\n\n components[11] = mask_miss\n\n return components", "def minimumCostMask(Ref, B1, B2, overlap_type, overlap_size):\n ref_mask = np.ones(Ref.shape)\n #vertical\n if overlap_type=='v':\n arr = np.power(B1[:,-overlap_size:]-Ref[:,0:overlap_size], 2).tolist()\n ref_mask[:,0:overlap_size] = minimumCostPathOnArray(arr)\n\n #horizontal\n elif overlap_type=='h':\n arr = np.power(B2[-overlap_size:, :]-Ref[0:overlap_size, :], 2)\n arr = arr.transpose()\n arr = arr.tolist()\n ref_mask[0:overlap_size,:] = minimumCostPathOnArray(arr).transpose()\n #both\n elif overlap_type=='b':\n # Vertical overlap\n arrv = np.power(B1[:,-overlap_size:]-Ref[:,0:overlap_size], 2).tolist()\n ref_mask[:,0:overlap_size] = minimumCostPathOnArray(arrv)\n # Horizontal overlap\n arrh = np.power(B2[-overlap_size:, :]-Ref[0:overlap_size, :], 2)\n arrh = arrh.transpose()\n arrh = arrh.tolist()\n ref_mask[0:overlap_size,:] = ref_mask[0:overlap_size,:]*(minimumCostPathOnArray(arrh).transpose())\n # To ensure that 0's from previous assignment to ref_mask remain 0's\n else:\n print(\"Error in min path\")\n\n return ref_mask", "def apply_mask(X: np.ndarray,\n mask_size: tuple = (4, 4),\n n_masks: int = 1,\n coord: tuple = None,\n channels: list = [0, 1, 2],\n mask_type: str = 'uniform',\n noise_distr: tuple = (0, 1),\n noise_rng: tuple = (0, 1),\n clip_rng: tuple = (0, 1)\n ) -> Tuple[np.ndarray, np.ndarray]:\n X_shape = X.shape\n\n # initialize mask\n if mask_type != 'zero':\n mask = np.zeros((n_masks,) + X_shape[1:])\n elif mask_type == 'zero':\n mask = np.ones((n_masks,) + X_shape[1:])\n else:\n raise ValueError('Only `normal`, `uniform` and `zero` masking available.')\n\n # create noise for mask\n if mask_type == 'normal':\n noise = np.random.normal(loc=noise_distr[0], scale=noise_distr[1], size=(n_masks,) + mask_size)\n elif mask_type == 'uniform':\n noise = np.random.uniform(low=noise_rng[0], high=noise_rng[1], size=(n_masks,) + mask_size)\n\n # find upper left coordinate for mask\n if coord is None:\n x_start = np.random.randint(0, X_shape[1] - mask_size[0], n_masks)\n y_start = np.random.randint(0, X_shape[2] - mask_size[1], n_masks)\n else:\n x_start, y_start = coord\n\n # update masks\n for _ in range(x_start.shape[0]):\n\n if mask_type == 'zero':\n update_val = 0\n else:\n update_val = noise[_]\n\n for c in channels:\n mask[\n _,\n x_start[_]:x_start[_] + mask_size[0],\n y_start[_]:y_start[_] + mask_size[1],\n c\n ] = update_val\n\n # apply masks to instances\n X_mask = []\n for _ in range(X_shape[0]):\n if mask_type == 'zero':\n X_mask_ = X[_].reshape((1,) + X_shape[1:]) * mask\n else:\n X_mask_ = np.clip(X[_].reshape((1,) + X_shape[1:]) + mask, clip_rng[0], clip_rng[1])\n X_mask.append(X_mask_)\n X_mask = np.concatenate(X_mask, axis=0)\n\n return X_mask, mask", "def mask(self):\n return list(self._mask_generator())", "def test_build_mask(dq, bitvalues, expected):\n result = build_mask(dq, bitvalues)\n assert np.array_equal(result, expected)", "def construct_by_element_mask(cls, mask):\n mask = np.asarray(mask)\n ue = np.unique(mask)\n if not (len(mask.shape) == 2 and len(ue) == 2 and 0 in ue and 1 in ue):\n raise ValueError(\"The mask matrix should be a 2d array, and there must be only \"\n \"1 and 0 in the matrix, in which, 1 means the corresponding \"\n \"element is known, and will be added to the MultiLabelIndexCollection container.\")\n\n nz_row, nz_col = np.nonzero(mask)\n return cls(data=[(nz_row[i], nz_col[i]) for i in range(len(nz_row))], label_size=mask.shape[1])", "def _mask(self):\n if self.__mask is None:\n # need this to be *exactly* the numpy boolean False\n return nomask\n return self.__mask", "def mask(self) -> list[int]:\n return self._mask", "def create_mask(neurons: list):\n\n return np.zeros_like(neurons[0])", "def _select_masks_to_boundaries(self, select_masks):\n\n def _merge(node_covers, select_idx):\n \"\"\"\n node_covers [[s1, e1], [s2, e2], ..., [sn, en]]\n \"\"\"\n new_node_covers = []\n for i in range(len(node_covers) - 1):\n if i == select_idx:\n merge_node_boundary = [node_covers[select_idx][0], node_covers[select_idx + 1][1]]\n new_node_covers.append(merge_node_boundary)\n elif i < select_idx:\n new_node_covers.append(node_covers[i])\n else:\n new_node_covers.append(node_covers[i + 1])\n return new_node_covers, merge_node_boundary\n batch_size = select_masks[0].size()[0]\n max_length = select_masks[0].size()[1] + 1\n combine_matrix = torch.rand(batch_size, max_length, 2).long()\n for j in range(max_length):\n combine_matrix[:, j, :] = j\n results = []\n for batch_idx in range(batch_size):\n node_covers = combine_matrix[batch_idx, :, :].numpy().tolist()\n result = []\n for node_idx in range(max_length - 2):\n select = select_masks[node_idx][batch_idx, :]\n select_idx = torch.nonzero(select).data[0][0]\n node_covers, merge_boundary = _merge(node_covers, select_idx)\n result.append(merge_boundary)\n results.append(result)\n results = torch.LongTensor(results)\n return results", "def __call__(self, image: np.ndarray) -> np.ndarray:\n # convert PIL image to numpy array\n image = np.asarray(image)\n\n # get masks, all pixels\n np_mask = np.array(np.ones(image.shape[0:2]), dtype=bool)\n\n return np_mask", "def intersect(arr, mask, label=None, substitution=np.nan):\n assert arr.shape == mask.shape\n\n if label is None:\n mask_idx_mat = mask != 0\n else:\n mask_idx_mat = mask == label\n\n if substitution == 'min':\n substitution = np.min(arr[mask_idx_mat])\n elif substitution == 'max':\n substitution = np.max(arr[mask_idx_mat])\n\n new_arr = arr.copy()\n new_arr[np.logical_not(mask_idx_mat)] = substitution\n return new_arr" ]
[ "0.6550779", "0.6427817", "0.6351284", "0.6048119", "0.5883029", "0.5829637", "0.58208704", "0.58193535", "0.5818369", "0.58154434", "0.57871544", "0.5785224", "0.57692295", "0.5747028", "0.5712273", "0.5712273", "0.56396306", "0.5627762", "0.5626429", "0.56193084", "0.5568146", "0.5546961", "0.5544342", "0.5540344", "0.5531479", "0.55271965", "0.55250233", "0.5493916", "0.5484911", "0.5482494", "0.54784495", "0.54705185", "0.5467018", "0.5438916", "0.5433851", "0.54282534", "0.54195946", "0.5405341", "0.5391081", "0.53799915", "0.53749454", "0.53723454", "0.53630453", "0.5352745", "0.5342755", "0.5342263", "0.5342025", "0.53366435", "0.5326283", "0.5320846", "0.53201157", "0.5316891", "0.5299593", "0.5296745", "0.52915406", "0.5288943", "0.52877927", "0.5284323", "0.52760285", "0.5275447", "0.5273742", "0.5264043", "0.52637106", "0.5262991", "0.5254595", "0.5253162", "0.5241319", "0.523878", "0.52362424", "0.5233947", "0.5231587", "0.52253133", "0.5221042", "0.52166194", "0.52157843", "0.5214885", "0.5211688", "0.51979554", "0.5194427", "0.51902133", "0.5180999", "0.5178121", "0.5177609", "0.5170207", "0.5168506", "0.51660013", "0.51533526", "0.51531446", "0.51448715", "0.51417994", "0.5139486", "0.51355314", "0.5134391", "0.51343197", "0.5127023", "0.5126378", "0.5125706", "0.51252514", "0.5119685", "0.51090896", "0.5108487" ]
0.0
-1
Return rules for checking.
def rules(cls): rules_CityscapesConfig = {"batch_size": {"type": int}, "root_path": {"type": str}, "num_parallel_batches": {"type": int}, "fixed_size": {"type": bool} } return rules_CityscapesConfig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rules(cls):\n raise NotImplementedError()", "def get_rules(self):\n rules = []\n for item in self.name:\n rules.append(item)\n return rules", "def get_rules(self):\n rules = []\n for item in self.rule:\n rules.append(item)\n return rules", "def get_rules(self):\n # TODO: Implement\n self.traverse2(self.tree, [])\n return self.rules", "def rules(self):\n return tuple(e for e in self.entries if e.is_rule)", "def get_rules(cls) -> list:\n return [factory() for factory in cls._rules_factories]", "def rules(self):\n self.rule1 = min(self.location_is_lessDemand, self.area_is_small, self.unfunishing)\n self.rule2 = min(self.location_is_lessDemand, max(self.area_is_small, self.area_is_average), self.access_is_good)\n self.rule3 = min(self.location_is_veryHighDemand, self.area_is_average, self.fac_is_low, self.access_is_average)\n self.rule4 = min(self.location_is_veryLessDemand, self.area_is_verysmall, self.fully_funishing)\n self.rule5 = min(self.location_is_lessDemand, self.fac_is_average, max(self.area_is_small, self.area_is_average))\n self.rule6 = min(max(self.location_is_lessDemand, self.location_is_averageDemand), self.access_is_good)\n self.rule7 = min(self.location_is_lessDemand, self.access_is_good, self.area_is_large, self.partially_funishing)\n self.rule8 = min(self.location_is_highDemand, self.access_is_good, max(self.bed_is_less, self.bath_is_average))\n self.rule9 = min(self.location_is_veryHighDemand, self.area_is_large, self.unfunishing)\n self.rule10 = min(self.access_is_good, self.area_is_average, (1 - self.unfunishing))\n self.rule11 = min(self.access_is_good, self.area_is_large, self.partially_funishing, self.bed_is_more, self.bath_is_more)", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperations']]:\n return pulumi.get(self, \"rules\")", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperations']]:\n return pulumi.get(self, \"rules\")", "def rules(cls):\n rules_Cityscapes = {\"common\": {\"type\": dict},\n \"train\": {\"type\": dict},\n \"val\": {\"type\": dict},\n \"test\": {\"type\": dict}\n }\n return rules_Cityscapes", "def getListOfRules(self):\n return self.model.getListOfRules()", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperationsPatch']]:\n return pulumi.get(self, \"rules\")", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperationsPatch']]:\n return pulumi.get(self, \"rules\")", "def validate(self):\n for rule in self.get_rules():\n rule.validate(self.get_val())", "def rule_conditions(self) -> pulumi.Output[Sequence['outputs.RuleRuleCondition']]:\n return pulumi.get(self, \"rule_conditions\")", "def filter_rules(self) -> list:\n return self.transform(self._tree), self._rules", "def hrules(self):\n ...", "def rule_conditions(self) -> pulumi.Input[Sequence[pulumi.Input['RuleRuleConditionArgs']]]:\n return pulumi.get(self, \"rule_conditions\")", "def rule_conditions(self) -> Sequence['outputs.GetRulesRuleRuleConditionResult']:\n return pulumi.get(self, \"rule_conditions\")", "def rules(self) -> List['outputs.PreventionInspectTemplateInspectConfigRuleSetRule']:\n return pulumi.get(self, \"rules\")", "def get_rules(self):\n return [phi for psi in self._Psi for phi in psi]", "def rules(self) -> FrozenOrderedSet[Union[Callable, Rule]]:\n return self._rules", "def test_rules():", "def getListOfRules(self, *args):\n return _libsbml.Model_getListOfRules(self, *args)", "def check_rules(self):\n conditions = ('element', 'name', 'type')\n metric = set(['type', 'value'])\n \n elements = {}\n for rule in self.rules:\n if not isinstance(rule, dict):\n self.fail(msg='Rule format is not type dict: %s, type: %s'\n % (rule, type(rule)))\n if 'name' not in rule:\n self.fail(msg='Name is a required field for all rules')\n if 'match_condition' in rule:\n if not isinstance(rule['match_condition'], list):\n self.fail(msg='Match condition is expected to be a list')\n\n for match in rule['match_condition']:\n if match.get('type') == 'metric':\n if set(match.keys()) ^ metric:\n self.fail(msg='Metric definition can only have '\n 'values: %s, given: %s' % (list(metric), match))\n continue\n # Validate fields in condition\n for field in conditions:\n if field not in match:\n self.fail(msg='Match condition is missing a required '\n 'key: %r ,required: %s' % (match, list(conditions)))\n if field == 'element' and match.get(field) not in access_list:\n self.fail(msg='Match condition element is not valid: %s, '\n 'valid types: %s' % (match.get(field), list(access_list)))\n elif field == 'type' and match[field] not in match_conditions:\n self.fail(msg='Match condition type is not valid: %s, '\n 'valid types: %s' % (match[field], list(match_conditions)))\n \n element = match.get('element')\n # peer_address can only be type engine or external_bgp_peer\n if match['type'] == 'peer_address' and element not \\\n in ('engine', 'external_bgp_peer'):\n self.fail(msg='A peer address element can only be of type '\n 'engine or external_bgp_peer, provided definition: %s' % match)\n elif match['type'] == 'next_hop' and ('prefix_list' not in \\\n element and 'access_list' not in element):\n self.fail(msg='A next hop definition must be either an access '\n 'list or prefix list type, provided defintion: %s' % match)\n \n if 'engine' in element:\n element = 'single_fw,fw_cluster,virtual_fw'\n elements.setdefault(\n element, set([])).add(match.get('name'))\n \n return [elements] if elements else []", "def effective_rules(self) -> pulumi.Output[Sequence[Any]]:\n return pulumi.get(self, \"effective_rules\")", "def vrules(self):\n ...", "def rules(self):\n return self._alert_rules_client", "def extract_rules(self, labels=None):\n # Extract flat list of rules in array form\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for dt in self._rule_dump]))\n \n # Convert each sub-rule into text, join together with '&' and then add to rules\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=labels, scaler=self.ext_scaler)) for r in rules])\n \n return self.rules", "def checks(self):\r\n return checks.Checks(self)", "def create_url_rules(self):\n return []", "def game_rules(self):\n self.valid_answers = dict()\n\n yield ScoreBoard(win=0, lose=0, tie=0)\n yield Results(winner='rock👊', loser='scissors✌', why='Rock👊 smashes scissors✌')\n yield Results(winner='paper👋', loser='rock👊', why='Paper👋 covers rock👊')\n yield Results(winner='scissors✌', loser='paper👋', why='Scissors✌ cut paper👋')\n yield ValidAnswer(answer='rock👊', key='r')\n yield ValidAnswer(answer='paper👋', key='p')\n yield ValidAnswer(answer='scissors✌', key='s')", "def items(self):\n return self._rules_by_lhs.items()", "def conditions(self):\n return self._separated_constructs(RuleCondition)", "def rules(self, transfer, robot_settings, dilution_settings):\n return []", "def getAllDecisionRules(self):\n\n #check this shit lol?\n thetas = self.getAllTheta()\n human_actions = self.getAllHumanActions()\n return [list(zip(thetas, item)) for item in itertools.product(human_actions, repeat=len(thetas))]", "def get_conditional_rules(self):\n conditional_rules = []\n\n for field in self.form.get_prep_value():\n\n rules = field['value'].get('rules', None)\n if rules:\n field_id = field['value'].get('field_id', None)\n if field_id:\n rules['field_name'] = field_id\n else:\n rules['field_name'] = clean_form_field_name(field['value']['label'])\n rules['required'] = field['value'].get('required', False)\n rules['field_type'] = field.get('type', None)\n conditions = rules.get('conditions', None)\n if len(conditions):\n for condition in conditions:\n del(condition['id'])\n del(condition['type'])\n condition['field_name'] = clean_form_field_name(condition['value']['field_name'])\n condition['rule'] = condition['value']['rule']\n condition['value'] = condition['value'].get('value', None)\n\n conditional_rules.append(rules)\n\n return conditional_rules", "def get_rules(self, M):\n return list(itertools.chain.from_iterable(\n list(itertools.chain.from_iterable(\n [[self.get_boxrules(x, M), self.get_unaryrules(x, M),\n self.at_least_one_rules(x, M), self.get_columnrules(x, M),\n self.get_rowrules(x, M)] for x in itertools.product(range(1, M+1),\n range(1, M+1))]\n ))\n ))", "def rules(self) -> pulumi.Input[Sequence[pulumi.Input['BucketLifecycleConfigurationV2RuleArgs']]]:\n return pulumi.get(self, \"rules\")", "def getRules(self):\n self._rules = {}\n _RuleSet = self._sets[self._currentSet - 1 ]\n for oneSet in _RuleSet :\n \n if len(oneSet) < 2 : \n pass \n \n for x in range(1, max(floor(len(oneSet) / 2),2) ):\n \n comb = combinations(oneSet, x)\n for item in comb:\n remaining = tuple(x for x in oneSet if x not in item)\n self._rules[(item,remaining)] = 0\n self._rules[(remaining,item)] = 0", "def extract_rule_names(self):\n if self.scanner == YARA:\n return sorted({result['rule'] for result in self.results})\n if self.scanner == CUSTOMS and 'matchedRules' in self.results:\n return self.results['matchedRules']\n # We do not have support for the remaining scanners (yet).\n return []", "def compliances(self) -> Sequence['outputs.GetRulesRuleComplianceResult']:\n return pulumi.get(self, \"compliances\")", "def get_rules(app):\n rules = [\n Rule('/', endpoint='home', handler='apps.busstopped.handlers.MainPage'),\n Rule('/ajax/busstopped/<line>/<direction>', endpoint='ajax-busstopped', handler='apps.busstopped.handlers.AjaxGetBusStopped'),\n Rule('/ajax/point', endpoint='ajax-point', handler='apps.busstopped.handlers.AjaxGetBusStopTimes'),\n Rule('/ajax/getbuspaths', endpoint='ajax-getbuspath', handler='apps.busstopped.handlers.AjaxGetBusPath'),\n Rule('/faq', endpoint='faq', handler='apps.busstopped.handlers.FAQPage'),\n Rule('/changelog', endpoint='change-log', handler='apps.busstopped.handlers.ChangeLogPage'),\n Rule('/info', endpoint='info', handler='apps.busstopped.handlers.InfoPage'),\n Rule('/addpoint', endpoint='add_point', handler='apps.busstopped.handlers.AddPointDocPage'),\n Rule('/news', endpoint='news', handler='apps.busstopped.handlers.NewsPage'),\n Rule('/parse', endpoint='parse', handler='apps.busstopped.handlers.ParseTimesPage'),\n ]\n\n return rules", "def get(self, *args):\n return _libsbml.ListOfRules_get(self, *args)", "def test_list_rules(self):\n pass", "def rules(cls):\n rules_CityscapesValConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesValConfig", "def get_all_rules(self):\n\n rules = set()\n for a_dict in self.get_dicts():\n rules = keywords.union(a_dict['rules'])\n return sorted(keywords)", "def _apply_commit_rules(rules, commit):\n all_violations = []\n for rule in rules:\n violations = rule.validate(commit)\n if violations:\n all_violations.extend(violations)\n return all_violations", "def rules(self) -> pulumi.Output[Sequence['outputs.BucketLifecycleConfigurationV2Rule']]:\n return pulumi.get(self, \"rules\")", "def get_rules():\n rules = []\n\n for app_module in get_config('tipfy', 'apps_installed'):\n try:\n # Load the urls module from the app and extend our rules.\n app_rules = import_string('%s.urls' % app_module)\n rules.extend(app_rules.get_rules())\n except ImportError:\n pass\n\n return rules", "def test_a1_check_rules(self):\n # Has rule\n rule = logic.check_rules(1, 1)\n self.assertEqual(rule, 1)\n rule = logic.check_rules(1, 2)\n self.assertEqual(rule, -1)\n rule = logic.check_rules(1, 4)\n self.assertEqual(rule, 3)\n rule = logic.check_rules(0, 3)\n self.assertEqual(rule, 4)\n rule = logic.check_rules(1, 8)\n self.assertEqual(rule, 3)\n rule = logic.check_rules(1, 0)\n self.assertEqual(rule, 1)\n\n # No rule match\n rule = logic.check_rules(0, 1)\n self.assertEqual(rule, -1)\n rule = logic.check_rules(0, 0)\n self.assertEqual(rule, -1)\n with self.assertRaises(ValueError):\n rule = logic.check_rules(1, -1)\n with self.assertRaises(ValueError):\n rule = logic.check_rules(1, 9)", "def get_rule_names(self):\n return self.rules.keys()", "def apply_rules(self):\n if len(self.rules) == 0:\n return\n for gene in self.population:\n for rule in self.rules:\n if rule.type == \"gene\":\n rule.check_and_apply(gene)", "def getChecks(self):\r\n raise AbstractError\r\n return []", "def rules(cls):\n rules_CityscapesTestConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesTestConfig", "def policy_rules(self) -> Sequence[Any]:\n return pulumi.get(self, \"policy_rules\")", "def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BucketLifecycleConfigurationV2RuleArgs']]]]:\n return pulumi.get(self, \"rules\")", "def __init__(self, rules):\n self.rules = rules\n\n self._rhs_rules = defaultdict(list)\n self._rhs_unary_rules = defaultdict(list)\n\n self._nonterm = set(rule.lhs for rule in rules)\n self._term = set(token for rhs in chain(rule.rhs for rule in rules)\n for token in rhs if token not in self._nonterm)\n\n for rule in rules:\n _, rhs, _ = rule\n self._rhs_rules[rhs].append(rule)\n\n for rhs_rules in self._rhs_rules.values():\n rhs_rules.sort(key=lambda r: r.log_prob, reverse=True)\n\n self._is_cnf = all(len(rule.rhs) == 1\n or (len(rule.rhs) == 2\n and all(s in self._nonterm for s in rule.rhs))\n for rule in self.rules)", "def create_rules(self, grids):\n from ..models import Rule\n\n alts = [grid.get_alternative_total_rating_tuples() for grid in grids]\n combinations = itertools.product(*alts)\n rules = []\n for combi in combinations:\n alts, ratings = zip(*combi)\n rules.append(Rule(alts, sum(ratings)))\n\n rules.sort()\n\n return rules", "def make_all_rules(self):\n\n def compatible(pattern1, pattern2, direction):\n \"\"\"Returns `True` if `pattern2` is compatible with `pattern1` in the `direction`,\n otherwise return `False`.\"\"\"\n if direction == 0:\n return pattern1[:-1] == pattern2[1:]\n if direction == 2:\n return [line[:-1] for line in pattern1] == [line[1:] for line in pattern2]\n\n for index in range(len(self.patterns)):\n for ind in range(index + 1):\n for direction in (0, 2):\n if compatible(self.patterns[index], self.patterns[ind], direction):\n self.rules[index][direction].add(ind)\n self.rules[ind][direction + 1].add(index)", "def rules(cls):\n rules_CityscapesTrainConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesTrainConfig", "def rules(self):\r\n return Acls(self)", "def rule_conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuleRuleConditionArgs']]]]:\n return pulumi.get(self, \"rule_conditions\")", "def _compile_rules(self):\n for state, table in self.RULES.items():\n patterns = []\n actions = []\n nextstates = []\n for i, row in enumerate(table):\n if len(row) == 2:\n pattern, action_ = row\n nextstate = None\n elif len(row) == 3:\n pattern, action_, nextstate = row\n else:\n fstr = \"invalid RULES: state {}, row {}\"\n raise CompileError(fstr.format(state, i))\n patterns.append(pattern)\n actions.append(action_)\n nextstates.append(nextstate)\n reobj = re.compile(\"|\".join(\"(\" + p + \")\" for p in patterns))\n self._rules[state] = (reobj, actions, nextstates)", "def get_real_rules():\n real = {}\n\n for name, rule in RULES.items():\n q = GraphMetric.select(GraphMetric.metric).where(\n GraphMetric.metric % name).group_by(GraphMetric.metric)\n\n for i in q:\n real[i.metric] = rule\n return real", "def eqv_path_rules(self) -> List[Tuple[CombinatorialClassType, Rule]]:\n eqv_path_rules = []\n curr = self.comb_class\n for rule in self.rules:\n eqv_path_rules.append((curr, rule))\n curr = rule.children[0]\n return eqv_path_rules", "def generate_rules(self):\n for rule in self._parser.conditionals:\n\n all_in_facts, matrix = self._generate_rules(rule)\n if all_in_facts is True:\n self.new_fact_from_facts(rule)\n else:\n facts = self._parser.conditionals[rule][1]\n #print(rule, facts, matrix)", "def find_matching_rules(self, requested_uri):\n rulesfound = []\n for rule in RewriteRule.objects.filter(register=self) :\n (matched,nestedrule) = rule.match_inheritance( requested_uri)\n if matched :\n rulechain = (rule,) + nestedrule.get_subrules() + (nestedrule,)\n rulesfound.append(rulechain)\n # get all sub rules not bound to a register \n return rulesfound", "def config_rules(self) -> Sequence['outputs.GetCompliancePacksPackConfigRuleResult']:\n return pulumi.get(self, \"config_rules\")", "def rule_list():\r\n #check RIGHT and DOWN borders\r\n all_blocks = get_blocks(-1)\r\n allowed = {}\r\n for i in range(len(all_blocks)): #index\r\n for j in range(len(all_blocks)):\r\n #check RIGHT border\r\n allowed[(i,j)] = [False,False]\r\n if all_blocks[i][1][2] == all_blocks[j][1][0]:\r\n allowed[(i,j)][0] = True\r\n #check DOWN border\r\n if all_blocks[i][2][1] == all_blocks[j][0][1]:\r\n allowed[(i,j)][1] = True\r\n return allowed", "def make_rules(UI):\n \n Conditionals = Conditional_Database(UI)\n location = UI.location\n \n \n Rules = []\n if location in ['Rio de Janeiro']:\n\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Br_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Br_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Br_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Br_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Br_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Br_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Br_Rule7func(policy_input)))\n\n elif location in ['Indonesia']:\n #National\n Rules.append(SDlib.Rule('Implement Some Restrictions Nationwide', 1, \n func = lambda policy_input: Conditionals.In_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Nationwide', 2, \n func = lambda policy_input: Conditionals.In_Rule2func(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Nationwide', 3, \n func = lambda policy_input: Conditionals.In_Rule3func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Nationwide', 4, \n func = lambda policy_input: Conditionals.In_Rule4func(policy_input))) \n #Java\n Rules.append(SDlib.Rule('Implement Some Restrictions Java - Zonal', 5, \n func = lambda policy_input: Conditionals.In_Rule1func_j(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Java - Zonal', 6, \n func = lambda policy_input: Conditionals.In_Rule2func_j(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Java - Zonal', 7, \n func = lambda policy_input: Conditionals.In_Rule3func_j(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Java - Zonal', 8, \n func = lambda policy_input: Conditionals.In_Rule4func_j(policy_input))) \n\n #Sulawesi\n Rules.append(SDlib.Rule('Implement Some Restrictions Sulawesi - Zonal', 9, \n func = lambda policy_input: Conditionals.In_Rule1func_s(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Sulawesi - Zonal', 10, \n func = lambda policy_input: Conditionals.In_Rule2func_s(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Sulawesi - Zonal', 11, \n func = lambda policy_input: Conditionals.In_Rule3func_s(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Sulawesi - Zonal', 12, \n func = lambda policy_input: Conditionals.In_Rule4func_s(policy_input))) \n\n elif location in ['Chile']:\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Ch_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Ch_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Ch_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Ch_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Ch_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Ch_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Ch_Rule7func(policy_input)))\n \n elif location in ['Santiago']:\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Sa_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Sa_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Sa_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Sa_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Sa_Rule5func(policy_input)))\n \n if location in ['Querétaro']:\n\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Br_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Br_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Br_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Br_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Br_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Br_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Br_Rule7func(policy_input)))\n\n return Rules", "def test_all_rules_are_tested():\n tested_rules = defaultdict(list)\n for test in TESTS:\n cls = test[0]\n rule = test[1]\n tested_rules[cls].append(rule)\n for cls in tested_rules.keys():\n if hasattr(cls, '_binary_rules'):\n rules = set(cls._binary_rules.keys())\n elif hasattr(cls, '_rules'):\n rules = set(cls._rules.keys())\n assert set(tested_rules[cls]) == rules", "def match_rules(rules, wm):\n res = []\n for r in rules:\n new_patterns = match_rule(r[0],r[1],r[2], wm)\n if new_patterns:\n print(\" Match succeeds\")\n print(\" Adding assertions to WM\")\n else:\n print(\" Match fails\")\n for n in new_patterns:\n if (n not in wm) and (n not in res):\n print(\" \",n)\n res.append(n)\n # print(\"new patterns so far = \", res)\n # print()\n # for testing\n # break\n return res", "def evaluate(self, request_info: RequestInfo):\n rule_results = [(rule, rule.matches(request_info)) for rule in self.rules]\n\n overriding_blocking_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.DENY and result == MatchResult.OVERRIDE\n ]\n overriding_allowing_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.ALLOW and result == MatchResult.OVERRIDE\n ]\n\n if len(overriding_allowing_rules) > 0:\n return Action.ALLOW, overriding_allowing_rules\n\n if len(overriding_blocking_rules) > 0:\n return Action.DENY, overriding_blocking_rules\n\n blocking_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.DENY and result == MatchResult.MATCH\n ]\n allowing_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.ALLOW and result == MatchResult.MATCH\n ]\n\n if len(allowing_rules) > 0:\n return Action.ALLOW, allowing_rules\n\n if len(blocking_rules) > 0:\n return Action.DENY, blocking_rules\n\n return Action.NOOP, None", "def get_searchable_rules(rules):\n searchable_rules = {rule.variable: {} for rule in rules}\n for rule in rules:\n searchable_rules[rule.variable][tuple(rule.derivation)] = rule\n return searchable_rules", "def get_rules_for_action(self, action_type: ActionType) -> List[\"Rule\"]:\n return [rule for rule in self.rules if rule.action_type == action_type]", "def get_rules(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/GetRulesV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"GetRulesV1\",\n keywords=kwargs,\n params=handle_single_argument(args, parameters, \"ids\")\n )", "def getRule(self, *args):\n return _libsbml.Model_getRule(self, *args)", "def rules_for_request(request):\n surt = request.GET.get('surt')\n warc = request.GET.get('warc')\n capture_date = int(request.GET.get('capture-date')) # XXX try/except\n if surt is None or warc is None or capture_date is None:\n return error('surt, warc, and capture-date query string params'\n ' are all required', {})\n surt = Surt(surt)\n tree = tree_for_surt(surt)\n warc_parts = warc.split('-') # XXX validate warc name\n warc_date = int(warc_parts[4][0:-5]) # Parse an int out of the date minus ms\n applicable_rules = []\n for rule in tree:\n start = int(rule.capture_start.strftime('%Y%m%d%H%M'))\n end = int(rule.capture_end.strftime('%Y%m%d%H%M'))\n if ((warc_date > start and warc_date < end) and\n (capture_date > start and capture_date < end)):\n applicable_rules.append(rule)\n # Here is where we would make a surface-level decision on the action to\n # be taken (block, auth, allow, rewrite, etc). A point of optimization would\n # be to use django to only select the rules matching the date range, but for\n # now, we select the whole tree. Also, date comparisons would probably be\n # faster than coercing to strings, then to ints, but I was running short\n # on time.\n return success([rule.summary() for rule in applicable_rules])", "def getSpecRules(self, rhs):\n if rhs not in self.itemSet:\n print('Please input a term contain in the term-set !')\n return None\n \n rules = dict()\n for key, value in self.freqSet.items():\n for item in value:\n if rhs.issubset(item) and len(item) > 1:\n item_supp = self.getSupport(item)\n item = item.difference(rhs)\n conf = item_supp / self.getSupport(item)\n if conf >= self.minConf:\n rules[item] = conf\n return rules", "def rules():\r\n print(\"\\n\\\r\n \\tMmmmm... Si vous etes ici, j'imagine que votre décision est prise !\\n\\\r\n Je ne veux pas vous inciter à arreter mais vous etes conscient des risques ?\\n\\\r\n Vous participez quand meme au plus grand combat de tous les temps...\\n\\n\\\r\n \\tBon je vous explique les règles : Vous allez affronter un autre titan en duel\\n\\\r\n Vous avez chacun des tentatives d'infliger une attaque. Si elles réussissent,\\n\\\r\n Vous aurez le choix d'infliger entre 0 et 100 dégats. Enfin non puisque tout\\n\\\r\n sera tiré au sort :)\\n\\\r\n La partie s'arrete quand l'un des deux titans est mort\\n\\\r\n \\tBonne chance a vous!\\n\")", "def _get_version_rules(self, vuln_versions):\n rules = []\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n regex_vr = \"[<>=*]+\"\n \"\"\"For all the vulnerable versions information that we get, we need to create\n comparable version object so that we can apply these rules on top of all the available\n versions of a pkg in the market.\"\"\"\n for version in vuln_versions:\n version = version.replace(\" \", \"\")\n sub_vers = version.split('||')\n for sub_ver in sub_vers:\n tmp = []\n vr_relations = re.split(regex_vr, sub_ver)\n op_relations = re.split(regex_op, sub_ver)\n # Single affected version.\n if len(vr_relations) == 1:\n tmp.append({\n 'key': \"=\",\n 'val': ComparableVersion(vr_relations[0])\n })\n # All versions affected.\n elif len(op_relations) == 1 and op_relations[0] == '*':\n tmp.append({\n 'key': \"*\",\n 'val': \"\"\n })\n else:\n for i in range(len(op_relations) - 1):\n tmp.append({\n 'key': op_relations[i],\n 'val': ComparableVersion(vr_relations[i + 1])\n })\n rules.append(tmp)\n\n return rules", "def validate(self):\n self._validate_rule(self.rules, allow_self_reference=self.allow_self_reference)\n self._validate_operator_arguments(self.rules)\n return self.errors", "def custom_rules(self) -> pulumi.Output[Optional[Sequence['outputs.FirewallPolicyCustomRule']]]:\n return pulumi.get(self, \"custom_rules\")", "def compile_rule(self, cmd):\n cfg, rules = self.config, self.rules\n if cmd == None:\n return Rule()\n if isinstance(cmd, dict):\n r = []\n if 'as' in cmd:\n r += [self.compile_rule(cmd['as'])]\n if 'find' in cmd:\n r += [FindRule(cmd['find'], self)]\n if 'split' in cmd:\n c = cmd['split']\n if 'by' not in c:\n raise ConfigError('\"split.by\" is not defined!')\n if 'as' not in c:\n raise ConfigError('\"split.as\" is not defined!')\n return SplitRule(c['by'], self.compile_rule(c['as']), self)\n if 'count' in cmd:\n r += [CountRule(cmd['count'], self)]\n if 'group' in cmd:\n r += [GroupRule(cmd['group'], self)]\n if len(r) == 0:\n return Rule()\n return AndRule(r) if len(r) > 1 else r[0]\n if isinstance(cmd, list):\n return AndRule([self.compile_rule(c) for c in cmd])\n if cmd[0] == '?':\n return FindRule(cmd[1:], self)\n if cmd[0] == '$':\n #reference\n key = cmd[1:]\n if key in rules:\n return rules[key]\n if key not in cfg:\n raise ConfigError('Reference \"%s\" not defined!' % cmd)\n if key in self.ref:\n raise ConfigError('Recursively reference to key \"%s\"' % key)\n self.ref.add(key)\n rules[key] = self.compile_rule(cfg[key])\n return rules[key]\n return AsRule(cmd, self)", "def _get_all_checks(self):\n this_class = self.__class__\n\n check_list = [\n getattr(self, func)\n for func in dir(self.__class__)\n if callable(getattr(this_class, func))\n and func.startswith(self.check_prefix)\n ]\n\n return check_list", "def get_rules(self, obj, current_path=[]):\n # If node isn't a rule or dictionary\n if type(obj) != dict:\n return []\n\n # If node is a rule return its location and its details\n if self.is_rule(obj):\n return [([self.field] + current_path, obj)]\n\n rules = []\n for path, val in obj.items():\n rules = rules + self.get_rules(val, current_path + [path])\n return rules", "def get_version_rules(self, vuln_versions):\n rules = []\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n regex_vr = \"[<>=*]+\"\n \"\"\"For all the vulnerable versions information that we get, we need to create\n comparable version object so that we can apply these rules on top of all the available\n versions of a pkg in the market.\"\"\"\n for version in vuln_versions:\n version = version.replace(\" \", \"\")\n version = version.replace(\"+incompatible\", \"\")\n sub_vers = version.split('||')\n for sub_ver in sub_vers:\n tmp = []\n vr_relations = re.split(regex_vr, sub_ver)\n op_relations = re.split(regex_op, sub_ver)\n # Single affected version.\n if len(vr_relations) == 1:\n tmp.append({\n 'key': \"=\",\n 'val': ComparableVersion(vr_relations[0])\n })\n # All versions affected.\n elif len(op_relations) == 1 and op_relations[0] == '*':\n tmp.append({\n 'key': \"*\",\n 'val': \"\"\n })\n else:\n for i in range(len(op_relations) - 1):\n tmp.append({\n 'key': op_relations[i],\n 'val': ComparableVersion(vr_relations[i + 1])\n })\n rules.append(tmp)\n\n return rules", "def rules():\n print \"Loading Rules\"\n return render_template(\"rules.html\")", "def parseFile(file,rules = None):\n if not rules: rules = RuleCollection()\n buf = \"\"\n for line in open(file,'r'):\n if not line[0]=='#':\n buf += line\n try:\n for (ptree,lo,hi) in ruleNT.scanString(buf):\n rules.add(Parser._convertRule(ptree))\n return rules\n except KeyError:\n print 'error near ',lo,'in',file\n return rules", "def get_rules(paths):\n raw_rules = []\n for path in paths:\n with open(path, \"r\", encoding=\"utf8\") as f:\n raw_rules += f.read().splitlines()\n \n return AdblockRules(raw_rules)", "def list_role_inference_rules(self):\n raise exception.NotImplemented() # pragma: no cover", "def get_rules(paths):\n raw_rules = []\n for path in paths:\n with open(path, \"r\", encoding=\"utf8\") as f:\n raw_rules += f.read().splitlines()\n \n return AdblockRules(raw_rules)", "def check_custom_rules(self):\n if self.custom_rules:\n with redirect_stdout(PrintLogger(name=\"pylint\", log_level=\"INFO\")):\n passed_custom, override = self.custom_rules(self.results.linter.stats, self.fname)\n if not passed_custom:\n self.logging.warning(\"{} FAILED CUSTOM CHECKS\".format(self.fname))\n self.custom_failed.append(self.fname)\n return passed_custom, override\n return False, False", "def readrules(self, fomalines):\n for lineno, l in enumerate(fomalines):\n if 'define' in l or 'def ' in l:\n rulecom = l.split(' #')\n r = re.findall(\"(defi?n?e?)\\s+(\\S+)\\s+([^;]+)\", rulecom[0])\n if len(r[0]) != 3:\n print \"Syntax error on line %i\" % lineno\n (_, rulename, rule) = r[0]\n if len(rulecom) > 1:\n commentline = rulecom[1].strip()\n else:\n commentline = ''\n self.rule_add(rulename, rule, commentline)\n if 'chain' in l:\n l = l.replace(';','')\n chain = re.findall('chain\\s+(.*)', l)\n rc = chain[0].replace(' ','').split(',')\n self.rc = rc", "def GetZeroQueryRules(input_file_name):\n rules = []\n with open(input_file_name, 'r') as input_file:\n for line in input_file:\n if line.startswith('#'):\n continue\n line = line.rstrip('\\r\\n')\n if not line:\n continue\n\n tokens = line.split('\\t')\n key = tokens[0]\n values = tokens[1].split(',')\n\n rules.append((key, values))\n rules.sort(lambda x, y: cmp(x[0], y[0])) # For binary search\n return rules", "def license_rules(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"license_rules\")", "def ip_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"ip_rules\")", "def apply_rules(term: Term, rules):\n return functools.reduce(apply_rule, rules, term)", "def get_rules_for_type(type):\n\n rules = get_db().execute('SELECT * FROM ruleset WHERE type=?', (type,)).fetchall()\n\n return rules" ]
[ "0.76105815", "0.7469598", "0.73660934", "0.7312732", "0.721728", "0.71480995", "0.70150936", "0.68892413", "0.68892413", "0.67432487", "0.66999936", "0.6694647", "0.6694647", "0.669131", "0.6645618", "0.6565479", "0.65333295", "0.6530434", "0.65208477", "0.651483", "0.65071875", "0.64957", "0.64917344", "0.6485106", "0.64597285", "0.645632", "0.6406928", "0.63938135", "0.6371905", "0.63318115", "0.6326222", "0.6316648", "0.6273836", "0.6265625", "0.6256686", "0.62564653", "0.62508076", "0.62500155", "0.6195834", "0.6165039", "0.61278063", "0.6122602", "0.61147696", "0.61017627", "0.60959786", "0.60942304", "0.6069379", "0.6068919", "0.60648304", "0.6062324", "0.6062047", "0.6041647", "0.6026492", "0.6014971", "0.6011419", "0.6010809", "0.6007865", "0.5986201", "0.59726673", "0.5965942", "0.59536374", "0.594658", "0.593446", "0.5930544", "0.5926714", "0.5888764", "0.5886819", "0.58844143", "0.58739257", "0.58707935", "0.58537644", "0.5829875", "0.5828193", "0.58248174", "0.58021975", "0.57991034", "0.5760543", "0.5746575", "0.5735376", "0.5733241", "0.57327616", "0.57306993", "0.5726072", "0.57221556", "0.57197666", "0.5710229", "0.57086056", "0.5704269", "0.56987405", "0.568455", "0.56782544", "0.56663966", "0.56466436", "0.56395495", "0.5600726", "0.55900496", "0.5580071", "0.5573578", "0.5553044", "0.5550792" ]
0.59890103
57
Return rules for checking.
def rules(cls): rules_CityscapesTrainConfig = {"batch_size": {"type": int}, "list_path": {"type": str} } return rules_CityscapesTrainConfig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rules(cls):\n raise NotImplementedError()", "def get_rules(self):\n rules = []\n for item in self.name:\n rules.append(item)\n return rules", "def get_rules(self):\n rules = []\n for item in self.rule:\n rules.append(item)\n return rules", "def get_rules(self):\n # TODO: Implement\n self.traverse2(self.tree, [])\n return self.rules", "def rules(self):\n return tuple(e for e in self.entries if e.is_rule)", "def get_rules(cls) -> list:\n return [factory() for factory in cls._rules_factories]", "def rules(self):\n self.rule1 = min(self.location_is_lessDemand, self.area_is_small, self.unfunishing)\n self.rule2 = min(self.location_is_lessDemand, max(self.area_is_small, self.area_is_average), self.access_is_good)\n self.rule3 = min(self.location_is_veryHighDemand, self.area_is_average, self.fac_is_low, self.access_is_average)\n self.rule4 = min(self.location_is_veryLessDemand, self.area_is_verysmall, self.fully_funishing)\n self.rule5 = min(self.location_is_lessDemand, self.fac_is_average, max(self.area_is_small, self.area_is_average))\n self.rule6 = min(max(self.location_is_lessDemand, self.location_is_averageDemand), self.access_is_good)\n self.rule7 = min(self.location_is_lessDemand, self.access_is_good, self.area_is_large, self.partially_funishing)\n self.rule8 = min(self.location_is_highDemand, self.access_is_good, max(self.bed_is_less, self.bath_is_average))\n self.rule9 = min(self.location_is_veryHighDemand, self.area_is_large, self.unfunishing)\n self.rule10 = min(self.access_is_good, self.area_is_average, (1 - self.unfunishing))\n self.rule11 = min(self.access_is_good, self.area_is_large, self.partially_funishing, self.bed_is_more, self.bath_is_more)", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperations']]:\n return pulumi.get(self, \"rules\")", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperations']]:\n return pulumi.get(self, \"rules\")", "def rules(cls):\n rules_Cityscapes = {\"common\": {\"type\": dict},\n \"train\": {\"type\": dict},\n \"val\": {\"type\": dict},\n \"test\": {\"type\": dict}\n }\n return rules_Cityscapes", "def getListOfRules(self):\n return self.model.getListOfRules()", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperationsPatch']]:\n return pulumi.get(self, \"rules\")", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperationsPatch']]:\n return pulumi.get(self, \"rules\")", "def validate(self):\n for rule in self.get_rules():\n rule.validate(self.get_val())", "def rule_conditions(self) -> pulumi.Output[Sequence['outputs.RuleRuleCondition']]:\n return pulumi.get(self, \"rule_conditions\")", "def filter_rules(self) -> list:\n return self.transform(self._tree), self._rules", "def hrules(self):\n ...", "def rule_conditions(self) -> pulumi.Input[Sequence[pulumi.Input['RuleRuleConditionArgs']]]:\n return pulumi.get(self, \"rule_conditions\")", "def rule_conditions(self) -> Sequence['outputs.GetRulesRuleRuleConditionResult']:\n return pulumi.get(self, \"rule_conditions\")", "def rules(self) -> List['outputs.PreventionInspectTemplateInspectConfigRuleSetRule']:\n return pulumi.get(self, \"rules\")", "def get_rules(self):\n return [phi for psi in self._Psi for phi in psi]", "def rules(self) -> FrozenOrderedSet[Union[Callable, Rule]]:\n return self._rules", "def test_rules():", "def getListOfRules(self, *args):\n return _libsbml.Model_getListOfRules(self, *args)", "def check_rules(self):\n conditions = ('element', 'name', 'type')\n metric = set(['type', 'value'])\n \n elements = {}\n for rule in self.rules:\n if not isinstance(rule, dict):\n self.fail(msg='Rule format is not type dict: %s, type: %s'\n % (rule, type(rule)))\n if 'name' not in rule:\n self.fail(msg='Name is a required field for all rules')\n if 'match_condition' in rule:\n if not isinstance(rule['match_condition'], list):\n self.fail(msg='Match condition is expected to be a list')\n\n for match in rule['match_condition']:\n if match.get('type') == 'metric':\n if set(match.keys()) ^ metric:\n self.fail(msg='Metric definition can only have '\n 'values: %s, given: %s' % (list(metric), match))\n continue\n # Validate fields in condition\n for field in conditions:\n if field not in match:\n self.fail(msg='Match condition is missing a required '\n 'key: %r ,required: %s' % (match, list(conditions)))\n if field == 'element' and match.get(field) not in access_list:\n self.fail(msg='Match condition element is not valid: %s, '\n 'valid types: %s' % (match.get(field), list(access_list)))\n elif field == 'type' and match[field] not in match_conditions:\n self.fail(msg='Match condition type is not valid: %s, '\n 'valid types: %s' % (match[field], list(match_conditions)))\n \n element = match.get('element')\n # peer_address can only be type engine or external_bgp_peer\n if match['type'] == 'peer_address' and element not \\\n in ('engine', 'external_bgp_peer'):\n self.fail(msg='A peer address element can only be of type '\n 'engine or external_bgp_peer, provided definition: %s' % match)\n elif match['type'] == 'next_hop' and ('prefix_list' not in \\\n element and 'access_list' not in element):\n self.fail(msg='A next hop definition must be either an access '\n 'list or prefix list type, provided defintion: %s' % match)\n \n if 'engine' in element:\n element = 'single_fw,fw_cluster,virtual_fw'\n elements.setdefault(\n element, set([])).add(match.get('name'))\n \n return [elements] if elements else []", "def effective_rules(self) -> pulumi.Output[Sequence[Any]]:\n return pulumi.get(self, \"effective_rules\")", "def vrules(self):\n ...", "def rules(self):\n return self._alert_rules_client", "def extract_rules(self, labels=None):\n # Extract flat list of rules in array form\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for dt in self._rule_dump]))\n \n # Convert each sub-rule into text, join together with '&' and then add to rules\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=labels, scaler=self.ext_scaler)) for r in rules])\n \n return self.rules", "def checks(self):\r\n return checks.Checks(self)", "def create_url_rules(self):\n return []", "def game_rules(self):\n self.valid_answers = dict()\n\n yield ScoreBoard(win=0, lose=0, tie=0)\n yield Results(winner='rock👊', loser='scissors✌', why='Rock👊 smashes scissors✌')\n yield Results(winner='paper👋', loser='rock👊', why='Paper👋 covers rock👊')\n yield Results(winner='scissors✌', loser='paper👋', why='Scissors✌ cut paper👋')\n yield ValidAnswer(answer='rock👊', key='r')\n yield ValidAnswer(answer='paper👋', key='p')\n yield ValidAnswer(answer='scissors✌', key='s')", "def items(self):\n return self._rules_by_lhs.items()", "def conditions(self):\n return self._separated_constructs(RuleCondition)", "def rules(self, transfer, robot_settings, dilution_settings):\n return []", "def getAllDecisionRules(self):\n\n #check this shit lol?\n thetas = self.getAllTheta()\n human_actions = self.getAllHumanActions()\n return [list(zip(thetas, item)) for item in itertools.product(human_actions, repeat=len(thetas))]", "def get_conditional_rules(self):\n conditional_rules = []\n\n for field in self.form.get_prep_value():\n\n rules = field['value'].get('rules', None)\n if rules:\n field_id = field['value'].get('field_id', None)\n if field_id:\n rules['field_name'] = field_id\n else:\n rules['field_name'] = clean_form_field_name(field['value']['label'])\n rules['required'] = field['value'].get('required', False)\n rules['field_type'] = field.get('type', None)\n conditions = rules.get('conditions', None)\n if len(conditions):\n for condition in conditions:\n del(condition['id'])\n del(condition['type'])\n condition['field_name'] = clean_form_field_name(condition['value']['field_name'])\n condition['rule'] = condition['value']['rule']\n condition['value'] = condition['value'].get('value', None)\n\n conditional_rules.append(rules)\n\n return conditional_rules", "def get_rules(self, M):\n return list(itertools.chain.from_iterable(\n list(itertools.chain.from_iterable(\n [[self.get_boxrules(x, M), self.get_unaryrules(x, M),\n self.at_least_one_rules(x, M), self.get_columnrules(x, M),\n self.get_rowrules(x, M)] for x in itertools.product(range(1, M+1),\n range(1, M+1))]\n ))\n ))", "def rules(self) -> pulumi.Input[Sequence[pulumi.Input['BucketLifecycleConfigurationV2RuleArgs']]]:\n return pulumi.get(self, \"rules\")", "def getRules(self):\n self._rules = {}\n _RuleSet = self._sets[self._currentSet - 1 ]\n for oneSet in _RuleSet :\n \n if len(oneSet) < 2 : \n pass \n \n for x in range(1, max(floor(len(oneSet) / 2),2) ):\n \n comb = combinations(oneSet, x)\n for item in comb:\n remaining = tuple(x for x in oneSet if x not in item)\n self._rules[(item,remaining)] = 0\n self._rules[(remaining,item)] = 0", "def extract_rule_names(self):\n if self.scanner == YARA:\n return sorted({result['rule'] for result in self.results})\n if self.scanner == CUSTOMS and 'matchedRules' in self.results:\n return self.results['matchedRules']\n # We do not have support for the remaining scanners (yet).\n return []", "def compliances(self) -> Sequence['outputs.GetRulesRuleComplianceResult']:\n return pulumi.get(self, \"compliances\")", "def get_rules(app):\n rules = [\n Rule('/', endpoint='home', handler='apps.busstopped.handlers.MainPage'),\n Rule('/ajax/busstopped/<line>/<direction>', endpoint='ajax-busstopped', handler='apps.busstopped.handlers.AjaxGetBusStopped'),\n Rule('/ajax/point', endpoint='ajax-point', handler='apps.busstopped.handlers.AjaxGetBusStopTimes'),\n Rule('/ajax/getbuspaths', endpoint='ajax-getbuspath', handler='apps.busstopped.handlers.AjaxGetBusPath'),\n Rule('/faq', endpoint='faq', handler='apps.busstopped.handlers.FAQPage'),\n Rule('/changelog', endpoint='change-log', handler='apps.busstopped.handlers.ChangeLogPage'),\n Rule('/info', endpoint='info', handler='apps.busstopped.handlers.InfoPage'),\n Rule('/addpoint', endpoint='add_point', handler='apps.busstopped.handlers.AddPointDocPage'),\n Rule('/news', endpoint='news', handler='apps.busstopped.handlers.NewsPage'),\n Rule('/parse', endpoint='parse', handler='apps.busstopped.handlers.ParseTimesPage'),\n ]\n\n return rules", "def get(self, *args):\n return _libsbml.ListOfRules_get(self, *args)", "def test_list_rules(self):\n pass", "def rules(cls):\n rules_CityscapesValConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesValConfig", "def get_all_rules(self):\n\n rules = set()\n for a_dict in self.get_dicts():\n rules = keywords.union(a_dict['rules'])\n return sorted(keywords)", "def _apply_commit_rules(rules, commit):\n all_violations = []\n for rule in rules:\n violations = rule.validate(commit)\n if violations:\n all_violations.extend(violations)\n return all_violations", "def rules(self) -> pulumi.Output[Sequence['outputs.BucketLifecycleConfigurationV2Rule']]:\n return pulumi.get(self, \"rules\")", "def get_rules():\n rules = []\n\n for app_module in get_config('tipfy', 'apps_installed'):\n try:\n # Load the urls module from the app and extend our rules.\n app_rules = import_string('%s.urls' % app_module)\n rules.extend(app_rules.get_rules())\n except ImportError:\n pass\n\n return rules", "def test_a1_check_rules(self):\n # Has rule\n rule = logic.check_rules(1, 1)\n self.assertEqual(rule, 1)\n rule = logic.check_rules(1, 2)\n self.assertEqual(rule, -1)\n rule = logic.check_rules(1, 4)\n self.assertEqual(rule, 3)\n rule = logic.check_rules(0, 3)\n self.assertEqual(rule, 4)\n rule = logic.check_rules(1, 8)\n self.assertEqual(rule, 3)\n rule = logic.check_rules(1, 0)\n self.assertEqual(rule, 1)\n\n # No rule match\n rule = logic.check_rules(0, 1)\n self.assertEqual(rule, -1)\n rule = logic.check_rules(0, 0)\n self.assertEqual(rule, -1)\n with self.assertRaises(ValueError):\n rule = logic.check_rules(1, -1)\n with self.assertRaises(ValueError):\n rule = logic.check_rules(1, 9)", "def get_rule_names(self):\n return self.rules.keys()", "def apply_rules(self):\n if len(self.rules) == 0:\n return\n for gene in self.population:\n for rule in self.rules:\n if rule.type == \"gene\":\n rule.check_and_apply(gene)", "def getChecks(self):\r\n raise AbstractError\r\n return []", "def rules(cls):\n rules_CityscapesTestConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesTestConfig", "def policy_rules(self) -> Sequence[Any]:\n return pulumi.get(self, \"policy_rules\")", "def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BucketLifecycleConfigurationV2RuleArgs']]]]:\n return pulumi.get(self, \"rules\")", "def rules(cls):\n rules_CityscapesConfig = {\"batch_size\": {\"type\": int},\n \"root_path\": {\"type\": str},\n \"num_parallel_batches\": {\"type\": int},\n \"fixed_size\": {\"type\": bool}\n }\n return rules_CityscapesConfig", "def __init__(self, rules):\n self.rules = rules\n\n self._rhs_rules = defaultdict(list)\n self._rhs_unary_rules = defaultdict(list)\n\n self._nonterm = set(rule.lhs for rule in rules)\n self._term = set(token for rhs in chain(rule.rhs for rule in rules)\n for token in rhs if token not in self._nonterm)\n\n for rule in rules:\n _, rhs, _ = rule\n self._rhs_rules[rhs].append(rule)\n\n for rhs_rules in self._rhs_rules.values():\n rhs_rules.sort(key=lambda r: r.log_prob, reverse=True)\n\n self._is_cnf = all(len(rule.rhs) == 1\n or (len(rule.rhs) == 2\n and all(s in self._nonterm for s in rule.rhs))\n for rule in self.rules)", "def create_rules(self, grids):\n from ..models import Rule\n\n alts = [grid.get_alternative_total_rating_tuples() for grid in grids]\n combinations = itertools.product(*alts)\n rules = []\n for combi in combinations:\n alts, ratings = zip(*combi)\n rules.append(Rule(alts, sum(ratings)))\n\n rules.sort()\n\n return rules", "def make_all_rules(self):\n\n def compatible(pattern1, pattern2, direction):\n \"\"\"Returns `True` if `pattern2` is compatible with `pattern1` in the `direction`,\n otherwise return `False`.\"\"\"\n if direction == 0:\n return pattern1[:-1] == pattern2[1:]\n if direction == 2:\n return [line[:-1] for line in pattern1] == [line[1:] for line in pattern2]\n\n for index in range(len(self.patterns)):\n for ind in range(index + 1):\n for direction in (0, 2):\n if compatible(self.patterns[index], self.patterns[ind], direction):\n self.rules[index][direction].add(ind)\n self.rules[ind][direction + 1].add(index)", "def rules(self):\r\n return Acls(self)", "def rule_conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuleRuleConditionArgs']]]]:\n return pulumi.get(self, \"rule_conditions\")", "def _compile_rules(self):\n for state, table in self.RULES.items():\n patterns = []\n actions = []\n nextstates = []\n for i, row in enumerate(table):\n if len(row) == 2:\n pattern, action_ = row\n nextstate = None\n elif len(row) == 3:\n pattern, action_, nextstate = row\n else:\n fstr = \"invalid RULES: state {}, row {}\"\n raise CompileError(fstr.format(state, i))\n patterns.append(pattern)\n actions.append(action_)\n nextstates.append(nextstate)\n reobj = re.compile(\"|\".join(\"(\" + p + \")\" for p in patterns))\n self._rules[state] = (reobj, actions, nextstates)", "def get_real_rules():\n real = {}\n\n for name, rule in RULES.items():\n q = GraphMetric.select(GraphMetric.metric).where(\n GraphMetric.metric % name).group_by(GraphMetric.metric)\n\n for i in q:\n real[i.metric] = rule\n return real", "def eqv_path_rules(self) -> List[Tuple[CombinatorialClassType, Rule]]:\n eqv_path_rules = []\n curr = self.comb_class\n for rule in self.rules:\n eqv_path_rules.append((curr, rule))\n curr = rule.children[0]\n return eqv_path_rules", "def generate_rules(self):\n for rule in self._parser.conditionals:\n\n all_in_facts, matrix = self._generate_rules(rule)\n if all_in_facts is True:\n self.new_fact_from_facts(rule)\n else:\n facts = self._parser.conditionals[rule][1]\n #print(rule, facts, matrix)", "def find_matching_rules(self, requested_uri):\n rulesfound = []\n for rule in RewriteRule.objects.filter(register=self) :\n (matched,nestedrule) = rule.match_inheritance( requested_uri)\n if matched :\n rulechain = (rule,) + nestedrule.get_subrules() + (nestedrule,)\n rulesfound.append(rulechain)\n # get all sub rules not bound to a register \n return rulesfound", "def config_rules(self) -> Sequence['outputs.GetCompliancePacksPackConfigRuleResult']:\n return pulumi.get(self, \"config_rules\")", "def rule_list():\r\n #check RIGHT and DOWN borders\r\n all_blocks = get_blocks(-1)\r\n allowed = {}\r\n for i in range(len(all_blocks)): #index\r\n for j in range(len(all_blocks)):\r\n #check RIGHT border\r\n allowed[(i,j)] = [False,False]\r\n if all_blocks[i][1][2] == all_blocks[j][1][0]:\r\n allowed[(i,j)][0] = True\r\n #check DOWN border\r\n if all_blocks[i][2][1] == all_blocks[j][0][1]:\r\n allowed[(i,j)][1] = True\r\n return allowed", "def make_rules(UI):\n \n Conditionals = Conditional_Database(UI)\n location = UI.location\n \n \n Rules = []\n if location in ['Rio de Janeiro']:\n\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Br_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Br_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Br_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Br_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Br_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Br_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Br_Rule7func(policy_input)))\n\n elif location in ['Indonesia']:\n #National\n Rules.append(SDlib.Rule('Implement Some Restrictions Nationwide', 1, \n func = lambda policy_input: Conditionals.In_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Nationwide', 2, \n func = lambda policy_input: Conditionals.In_Rule2func(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Nationwide', 3, \n func = lambda policy_input: Conditionals.In_Rule3func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Nationwide', 4, \n func = lambda policy_input: Conditionals.In_Rule4func(policy_input))) \n #Java\n Rules.append(SDlib.Rule('Implement Some Restrictions Java - Zonal', 5, \n func = lambda policy_input: Conditionals.In_Rule1func_j(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Java - Zonal', 6, \n func = lambda policy_input: Conditionals.In_Rule2func_j(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Java - Zonal', 7, \n func = lambda policy_input: Conditionals.In_Rule3func_j(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Java - Zonal', 8, \n func = lambda policy_input: Conditionals.In_Rule4func_j(policy_input))) \n\n #Sulawesi\n Rules.append(SDlib.Rule('Implement Some Restrictions Sulawesi - Zonal', 9, \n func = lambda policy_input: Conditionals.In_Rule1func_s(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Sulawesi - Zonal', 10, \n func = lambda policy_input: Conditionals.In_Rule2func_s(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Sulawesi - Zonal', 11, \n func = lambda policy_input: Conditionals.In_Rule3func_s(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Sulawesi - Zonal', 12, \n func = lambda policy_input: Conditionals.In_Rule4func_s(policy_input))) \n\n elif location in ['Chile']:\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Ch_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Ch_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Ch_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Ch_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Ch_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Ch_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Ch_Rule7func(policy_input)))\n \n elif location in ['Santiago']:\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Sa_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Sa_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Sa_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Sa_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Sa_Rule5func(policy_input)))\n \n if location in ['Querétaro']:\n\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Br_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Br_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Br_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Br_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Br_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Br_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Br_Rule7func(policy_input)))\n\n return Rules", "def test_all_rules_are_tested():\n tested_rules = defaultdict(list)\n for test in TESTS:\n cls = test[0]\n rule = test[1]\n tested_rules[cls].append(rule)\n for cls in tested_rules.keys():\n if hasattr(cls, '_binary_rules'):\n rules = set(cls._binary_rules.keys())\n elif hasattr(cls, '_rules'):\n rules = set(cls._rules.keys())\n assert set(tested_rules[cls]) == rules", "def match_rules(rules, wm):\n res = []\n for r in rules:\n new_patterns = match_rule(r[0],r[1],r[2], wm)\n if new_patterns:\n print(\" Match succeeds\")\n print(\" Adding assertions to WM\")\n else:\n print(\" Match fails\")\n for n in new_patterns:\n if (n not in wm) and (n not in res):\n print(\" \",n)\n res.append(n)\n # print(\"new patterns so far = \", res)\n # print()\n # for testing\n # break\n return res", "def evaluate(self, request_info: RequestInfo):\n rule_results = [(rule, rule.matches(request_info)) for rule in self.rules]\n\n overriding_blocking_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.DENY and result == MatchResult.OVERRIDE\n ]\n overriding_allowing_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.ALLOW and result == MatchResult.OVERRIDE\n ]\n\n if len(overriding_allowing_rules) > 0:\n return Action.ALLOW, overriding_allowing_rules\n\n if len(overriding_blocking_rules) > 0:\n return Action.DENY, overriding_blocking_rules\n\n blocking_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.DENY and result == MatchResult.MATCH\n ]\n allowing_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.ALLOW and result == MatchResult.MATCH\n ]\n\n if len(allowing_rules) > 0:\n return Action.ALLOW, allowing_rules\n\n if len(blocking_rules) > 0:\n return Action.DENY, blocking_rules\n\n return Action.NOOP, None", "def get_searchable_rules(rules):\n searchable_rules = {rule.variable: {} for rule in rules}\n for rule in rules:\n searchable_rules[rule.variable][tuple(rule.derivation)] = rule\n return searchable_rules", "def get_rules_for_action(self, action_type: ActionType) -> List[\"Rule\"]:\n return [rule for rule in self.rules if rule.action_type == action_type]", "def get_rules(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/GetRulesV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"GetRulesV1\",\n keywords=kwargs,\n params=handle_single_argument(args, parameters, \"ids\")\n )", "def getRule(self, *args):\n return _libsbml.Model_getRule(self, *args)", "def rules_for_request(request):\n surt = request.GET.get('surt')\n warc = request.GET.get('warc')\n capture_date = int(request.GET.get('capture-date')) # XXX try/except\n if surt is None or warc is None or capture_date is None:\n return error('surt, warc, and capture-date query string params'\n ' are all required', {})\n surt = Surt(surt)\n tree = tree_for_surt(surt)\n warc_parts = warc.split('-') # XXX validate warc name\n warc_date = int(warc_parts[4][0:-5]) # Parse an int out of the date minus ms\n applicable_rules = []\n for rule in tree:\n start = int(rule.capture_start.strftime('%Y%m%d%H%M'))\n end = int(rule.capture_end.strftime('%Y%m%d%H%M'))\n if ((warc_date > start and warc_date < end) and\n (capture_date > start and capture_date < end)):\n applicable_rules.append(rule)\n # Here is where we would make a surface-level decision on the action to\n # be taken (block, auth, allow, rewrite, etc). A point of optimization would\n # be to use django to only select the rules matching the date range, but for\n # now, we select the whole tree. Also, date comparisons would probably be\n # faster than coercing to strings, then to ints, but I was running short\n # on time.\n return success([rule.summary() for rule in applicable_rules])", "def getSpecRules(self, rhs):\n if rhs not in self.itemSet:\n print('Please input a term contain in the term-set !')\n return None\n \n rules = dict()\n for key, value in self.freqSet.items():\n for item in value:\n if rhs.issubset(item) and len(item) > 1:\n item_supp = self.getSupport(item)\n item = item.difference(rhs)\n conf = item_supp / self.getSupport(item)\n if conf >= self.minConf:\n rules[item] = conf\n return rules", "def rules():\r\n print(\"\\n\\\r\n \\tMmmmm... Si vous etes ici, j'imagine que votre décision est prise !\\n\\\r\n Je ne veux pas vous inciter à arreter mais vous etes conscient des risques ?\\n\\\r\n Vous participez quand meme au plus grand combat de tous les temps...\\n\\n\\\r\n \\tBon je vous explique les règles : Vous allez affronter un autre titan en duel\\n\\\r\n Vous avez chacun des tentatives d'infliger une attaque. Si elles réussissent,\\n\\\r\n Vous aurez le choix d'infliger entre 0 et 100 dégats. Enfin non puisque tout\\n\\\r\n sera tiré au sort :)\\n\\\r\n La partie s'arrete quand l'un des deux titans est mort\\n\\\r\n \\tBonne chance a vous!\\n\")", "def _get_version_rules(self, vuln_versions):\n rules = []\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n regex_vr = \"[<>=*]+\"\n \"\"\"For all the vulnerable versions information that we get, we need to create\n comparable version object so that we can apply these rules on top of all the available\n versions of a pkg in the market.\"\"\"\n for version in vuln_versions:\n version = version.replace(\" \", \"\")\n sub_vers = version.split('||')\n for sub_ver in sub_vers:\n tmp = []\n vr_relations = re.split(regex_vr, sub_ver)\n op_relations = re.split(regex_op, sub_ver)\n # Single affected version.\n if len(vr_relations) == 1:\n tmp.append({\n 'key': \"=\",\n 'val': ComparableVersion(vr_relations[0])\n })\n # All versions affected.\n elif len(op_relations) == 1 and op_relations[0] == '*':\n tmp.append({\n 'key': \"*\",\n 'val': \"\"\n })\n else:\n for i in range(len(op_relations) - 1):\n tmp.append({\n 'key': op_relations[i],\n 'val': ComparableVersion(vr_relations[i + 1])\n })\n rules.append(tmp)\n\n return rules", "def validate(self):\n self._validate_rule(self.rules, allow_self_reference=self.allow_self_reference)\n self._validate_operator_arguments(self.rules)\n return self.errors", "def custom_rules(self) -> pulumi.Output[Optional[Sequence['outputs.FirewallPolicyCustomRule']]]:\n return pulumi.get(self, \"custom_rules\")", "def compile_rule(self, cmd):\n cfg, rules = self.config, self.rules\n if cmd == None:\n return Rule()\n if isinstance(cmd, dict):\n r = []\n if 'as' in cmd:\n r += [self.compile_rule(cmd['as'])]\n if 'find' in cmd:\n r += [FindRule(cmd['find'], self)]\n if 'split' in cmd:\n c = cmd['split']\n if 'by' not in c:\n raise ConfigError('\"split.by\" is not defined!')\n if 'as' not in c:\n raise ConfigError('\"split.as\" is not defined!')\n return SplitRule(c['by'], self.compile_rule(c['as']), self)\n if 'count' in cmd:\n r += [CountRule(cmd['count'], self)]\n if 'group' in cmd:\n r += [GroupRule(cmd['group'], self)]\n if len(r) == 0:\n return Rule()\n return AndRule(r) if len(r) > 1 else r[0]\n if isinstance(cmd, list):\n return AndRule([self.compile_rule(c) for c in cmd])\n if cmd[0] == '?':\n return FindRule(cmd[1:], self)\n if cmd[0] == '$':\n #reference\n key = cmd[1:]\n if key in rules:\n return rules[key]\n if key not in cfg:\n raise ConfigError('Reference \"%s\" not defined!' % cmd)\n if key in self.ref:\n raise ConfigError('Recursively reference to key \"%s\"' % key)\n self.ref.add(key)\n rules[key] = self.compile_rule(cfg[key])\n return rules[key]\n return AsRule(cmd, self)", "def _get_all_checks(self):\n this_class = self.__class__\n\n check_list = [\n getattr(self, func)\n for func in dir(self.__class__)\n if callable(getattr(this_class, func))\n and func.startswith(self.check_prefix)\n ]\n\n return check_list", "def get_rules(self, obj, current_path=[]):\n # If node isn't a rule or dictionary\n if type(obj) != dict:\n return []\n\n # If node is a rule return its location and its details\n if self.is_rule(obj):\n return [([self.field] + current_path, obj)]\n\n rules = []\n for path, val in obj.items():\n rules = rules + self.get_rules(val, current_path + [path])\n return rules", "def get_version_rules(self, vuln_versions):\n rules = []\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n regex_vr = \"[<>=*]+\"\n \"\"\"For all the vulnerable versions information that we get, we need to create\n comparable version object so that we can apply these rules on top of all the available\n versions of a pkg in the market.\"\"\"\n for version in vuln_versions:\n version = version.replace(\" \", \"\")\n version = version.replace(\"+incompatible\", \"\")\n sub_vers = version.split('||')\n for sub_ver in sub_vers:\n tmp = []\n vr_relations = re.split(regex_vr, sub_ver)\n op_relations = re.split(regex_op, sub_ver)\n # Single affected version.\n if len(vr_relations) == 1:\n tmp.append({\n 'key': \"=\",\n 'val': ComparableVersion(vr_relations[0])\n })\n # All versions affected.\n elif len(op_relations) == 1 and op_relations[0] == '*':\n tmp.append({\n 'key': \"*\",\n 'val': \"\"\n })\n else:\n for i in range(len(op_relations) - 1):\n tmp.append({\n 'key': op_relations[i],\n 'val': ComparableVersion(vr_relations[i + 1])\n })\n rules.append(tmp)\n\n return rules", "def rules():\n print \"Loading Rules\"\n return render_template(\"rules.html\")", "def parseFile(file,rules = None):\n if not rules: rules = RuleCollection()\n buf = \"\"\n for line in open(file,'r'):\n if not line[0]=='#':\n buf += line\n try:\n for (ptree,lo,hi) in ruleNT.scanString(buf):\n rules.add(Parser._convertRule(ptree))\n return rules\n except KeyError:\n print 'error near ',lo,'in',file\n return rules", "def get_rules(paths):\n raw_rules = []\n for path in paths:\n with open(path, \"r\", encoding=\"utf8\") as f:\n raw_rules += f.read().splitlines()\n \n return AdblockRules(raw_rules)", "def list_role_inference_rules(self):\n raise exception.NotImplemented() # pragma: no cover", "def get_rules(paths):\n raw_rules = []\n for path in paths:\n with open(path, \"r\", encoding=\"utf8\") as f:\n raw_rules += f.read().splitlines()\n \n return AdblockRules(raw_rules)", "def check_custom_rules(self):\n if self.custom_rules:\n with redirect_stdout(PrintLogger(name=\"pylint\", log_level=\"INFO\")):\n passed_custom, override = self.custom_rules(self.results.linter.stats, self.fname)\n if not passed_custom:\n self.logging.warning(\"{} FAILED CUSTOM CHECKS\".format(self.fname))\n self.custom_failed.append(self.fname)\n return passed_custom, override\n return False, False", "def readrules(self, fomalines):\n for lineno, l in enumerate(fomalines):\n if 'define' in l or 'def ' in l:\n rulecom = l.split(' #')\n r = re.findall(\"(defi?n?e?)\\s+(\\S+)\\s+([^;]+)\", rulecom[0])\n if len(r[0]) != 3:\n print \"Syntax error on line %i\" % lineno\n (_, rulename, rule) = r[0]\n if len(rulecom) > 1:\n commentline = rulecom[1].strip()\n else:\n commentline = ''\n self.rule_add(rulename, rule, commentline)\n if 'chain' in l:\n l = l.replace(';','')\n chain = re.findall('chain\\s+(.*)', l)\n rc = chain[0].replace(' ','').split(',')\n self.rc = rc", "def GetZeroQueryRules(input_file_name):\n rules = []\n with open(input_file_name, 'r') as input_file:\n for line in input_file:\n if line.startswith('#'):\n continue\n line = line.rstrip('\\r\\n')\n if not line:\n continue\n\n tokens = line.split('\\t')\n key = tokens[0]\n values = tokens[1].split(',')\n\n rules.append((key, values))\n rules.sort(lambda x, y: cmp(x[0], y[0])) # For binary search\n return rules", "def license_rules(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"license_rules\")", "def ip_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"ip_rules\")", "def apply_rules(term: Term, rules):\n return functools.reduce(apply_rule, rules, term)", "def get_rules_for_type(type):\n\n rules = get_db().execute('SELECT * FROM ruleset WHERE type=?', (type,)).fetchall()\n\n return rules" ]
[ "0.76105815", "0.7469598", "0.73660934", "0.7312732", "0.721728", "0.71480995", "0.70150936", "0.68892413", "0.68892413", "0.67432487", "0.66999936", "0.6694647", "0.6694647", "0.669131", "0.6645618", "0.6565479", "0.65333295", "0.6530434", "0.65208477", "0.651483", "0.65071875", "0.64957", "0.64917344", "0.6485106", "0.64597285", "0.645632", "0.6406928", "0.63938135", "0.6371905", "0.63318115", "0.6326222", "0.6316648", "0.6273836", "0.6265625", "0.6256686", "0.62564653", "0.62508076", "0.62500155", "0.6195834", "0.6165039", "0.61278063", "0.6122602", "0.61147696", "0.61017627", "0.60959786", "0.60942304", "0.6069379", "0.6068919", "0.60648304", "0.6062324", "0.6062047", "0.6041647", "0.6026492", "0.6014971", "0.6011419", "0.6010809", "0.6007865", "0.59890103", "0.5986201", "0.59726673", "0.5965942", "0.594658", "0.593446", "0.5930544", "0.5926714", "0.5888764", "0.5886819", "0.58844143", "0.58739257", "0.58707935", "0.58537644", "0.5829875", "0.5828193", "0.58248174", "0.58021975", "0.57991034", "0.5760543", "0.5746575", "0.5735376", "0.5733241", "0.57327616", "0.57306993", "0.5726072", "0.57221556", "0.57197666", "0.5710229", "0.57086056", "0.5704269", "0.56987405", "0.568455", "0.56782544", "0.56663966", "0.56466436", "0.56395495", "0.5600726", "0.55900496", "0.5580071", "0.5573578", "0.5553044", "0.5550792" ]
0.59536374
61
Return rules for checking.
def rules(cls): rules_CityscapesValConfig = {"batch_size": {"type": int}, "list_path": {"type": str} } return rules_CityscapesValConfig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rules(cls):\n raise NotImplementedError()", "def get_rules(self):\n rules = []\n for item in self.name:\n rules.append(item)\n return rules", "def get_rules(self):\n rules = []\n for item in self.rule:\n rules.append(item)\n return rules", "def get_rules(self):\n # TODO: Implement\n self.traverse2(self.tree, [])\n return self.rules", "def rules(self):\n return tuple(e for e in self.entries if e.is_rule)", "def get_rules(cls) -> list:\n return [factory() for factory in cls._rules_factories]", "def rules(self):\n self.rule1 = min(self.location_is_lessDemand, self.area_is_small, self.unfunishing)\n self.rule2 = min(self.location_is_lessDemand, max(self.area_is_small, self.area_is_average), self.access_is_good)\n self.rule3 = min(self.location_is_veryHighDemand, self.area_is_average, self.fac_is_low, self.access_is_average)\n self.rule4 = min(self.location_is_veryLessDemand, self.area_is_verysmall, self.fully_funishing)\n self.rule5 = min(self.location_is_lessDemand, self.fac_is_average, max(self.area_is_small, self.area_is_average))\n self.rule6 = min(max(self.location_is_lessDemand, self.location_is_averageDemand), self.access_is_good)\n self.rule7 = min(self.location_is_lessDemand, self.access_is_good, self.area_is_large, self.partially_funishing)\n self.rule8 = min(self.location_is_highDemand, self.access_is_good, max(self.bed_is_less, self.bath_is_average))\n self.rule9 = min(self.location_is_veryHighDemand, self.area_is_large, self.unfunishing)\n self.rule10 = min(self.access_is_good, self.area_is_average, (1 - self.unfunishing))\n self.rule11 = min(self.access_is_good, self.area_is_large, self.partially_funishing, self.bed_is_more, self.bath_is_more)", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperations']]:\n return pulumi.get(self, \"rules\")", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperations']]:\n return pulumi.get(self, \"rules\")", "def rules(cls):\n rules_Cityscapes = {\"common\": {\"type\": dict},\n \"train\": {\"type\": dict},\n \"val\": {\"type\": dict},\n \"test\": {\"type\": dict}\n }\n return rules_Cityscapes", "def getListOfRules(self):\n return self.model.getListOfRules()", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperationsPatch']]:\n return pulumi.get(self, \"rules\")", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperationsPatch']]:\n return pulumi.get(self, \"rules\")", "def validate(self):\n for rule in self.get_rules():\n rule.validate(self.get_val())", "def rule_conditions(self) -> pulumi.Output[Sequence['outputs.RuleRuleCondition']]:\n return pulumi.get(self, \"rule_conditions\")", "def filter_rules(self) -> list:\n return self.transform(self._tree), self._rules", "def hrules(self):\n ...", "def rule_conditions(self) -> pulumi.Input[Sequence[pulumi.Input['RuleRuleConditionArgs']]]:\n return pulumi.get(self, \"rule_conditions\")", "def rule_conditions(self) -> Sequence['outputs.GetRulesRuleRuleConditionResult']:\n return pulumi.get(self, \"rule_conditions\")", "def rules(self) -> List['outputs.PreventionInspectTemplateInspectConfigRuleSetRule']:\n return pulumi.get(self, \"rules\")", "def get_rules(self):\n return [phi for psi in self._Psi for phi in psi]", "def rules(self) -> FrozenOrderedSet[Union[Callable, Rule]]:\n return self._rules", "def test_rules():", "def getListOfRules(self, *args):\n return _libsbml.Model_getListOfRules(self, *args)", "def check_rules(self):\n conditions = ('element', 'name', 'type')\n metric = set(['type', 'value'])\n \n elements = {}\n for rule in self.rules:\n if not isinstance(rule, dict):\n self.fail(msg='Rule format is not type dict: %s, type: %s'\n % (rule, type(rule)))\n if 'name' not in rule:\n self.fail(msg='Name is a required field for all rules')\n if 'match_condition' in rule:\n if not isinstance(rule['match_condition'], list):\n self.fail(msg='Match condition is expected to be a list')\n\n for match in rule['match_condition']:\n if match.get('type') == 'metric':\n if set(match.keys()) ^ metric:\n self.fail(msg='Metric definition can only have '\n 'values: %s, given: %s' % (list(metric), match))\n continue\n # Validate fields in condition\n for field in conditions:\n if field not in match:\n self.fail(msg='Match condition is missing a required '\n 'key: %r ,required: %s' % (match, list(conditions)))\n if field == 'element' and match.get(field) not in access_list:\n self.fail(msg='Match condition element is not valid: %s, '\n 'valid types: %s' % (match.get(field), list(access_list)))\n elif field == 'type' and match[field] not in match_conditions:\n self.fail(msg='Match condition type is not valid: %s, '\n 'valid types: %s' % (match[field], list(match_conditions)))\n \n element = match.get('element')\n # peer_address can only be type engine or external_bgp_peer\n if match['type'] == 'peer_address' and element not \\\n in ('engine', 'external_bgp_peer'):\n self.fail(msg='A peer address element can only be of type '\n 'engine or external_bgp_peer, provided definition: %s' % match)\n elif match['type'] == 'next_hop' and ('prefix_list' not in \\\n element and 'access_list' not in element):\n self.fail(msg='A next hop definition must be either an access '\n 'list or prefix list type, provided defintion: %s' % match)\n \n if 'engine' in element:\n element = 'single_fw,fw_cluster,virtual_fw'\n elements.setdefault(\n element, set([])).add(match.get('name'))\n \n return [elements] if elements else []", "def effective_rules(self) -> pulumi.Output[Sequence[Any]]:\n return pulumi.get(self, \"effective_rules\")", "def vrules(self):\n ...", "def rules(self):\n return self._alert_rules_client", "def extract_rules(self, labels=None):\n # Extract flat list of rules in array form\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for dt in self._rule_dump]))\n \n # Convert each sub-rule into text, join together with '&' and then add to rules\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=labels, scaler=self.ext_scaler)) for r in rules])\n \n return self.rules", "def checks(self):\r\n return checks.Checks(self)", "def create_url_rules(self):\n return []", "def game_rules(self):\n self.valid_answers = dict()\n\n yield ScoreBoard(win=0, lose=0, tie=0)\n yield Results(winner='rock👊', loser='scissors✌', why='Rock👊 smashes scissors✌')\n yield Results(winner='paper👋', loser='rock👊', why='Paper👋 covers rock👊')\n yield Results(winner='scissors✌', loser='paper👋', why='Scissors✌ cut paper👋')\n yield ValidAnswer(answer='rock👊', key='r')\n yield ValidAnswer(answer='paper👋', key='p')\n yield ValidAnswer(answer='scissors✌', key='s')", "def items(self):\n return self._rules_by_lhs.items()", "def conditions(self):\n return self._separated_constructs(RuleCondition)", "def rules(self, transfer, robot_settings, dilution_settings):\n return []", "def getAllDecisionRules(self):\n\n #check this shit lol?\n thetas = self.getAllTheta()\n human_actions = self.getAllHumanActions()\n return [list(zip(thetas, item)) for item in itertools.product(human_actions, repeat=len(thetas))]", "def get_conditional_rules(self):\n conditional_rules = []\n\n for field in self.form.get_prep_value():\n\n rules = field['value'].get('rules', None)\n if rules:\n field_id = field['value'].get('field_id', None)\n if field_id:\n rules['field_name'] = field_id\n else:\n rules['field_name'] = clean_form_field_name(field['value']['label'])\n rules['required'] = field['value'].get('required', False)\n rules['field_type'] = field.get('type', None)\n conditions = rules.get('conditions', None)\n if len(conditions):\n for condition in conditions:\n del(condition['id'])\n del(condition['type'])\n condition['field_name'] = clean_form_field_name(condition['value']['field_name'])\n condition['rule'] = condition['value']['rule']\n condition['value'] = condition['value'].get('value', None)\n\n conditional_rules.append(rules)\n\n return conditional_rules", "def get_rules(self, M):\n return list(itertools.chain.from_iterable(\n list(itertools.chain.from_iterable(\n [[self.get_boxrules(x, M), self.get_unaryrules(x, M),\n self.at_least_one_rules(x, M), self.get_columnrules(x, M),\n self.get_rowrules(x, M)] for x in itertools.product(range(1, M+1),\n range(1, M+1))]\n ))\n ))", "def rules(self) -> pulumi.Input[Sequence[pulumi.Input['BucketLifecycleConfigurationV2RuleArgs']]]:\n return pulumi.get(self, \"rules\")", "def getRules(self):\n self._rules = {}\n _RuleSet = self._sets[self._currentSet - 1 ]\n for oneSet in _RuleSet :\n \n if len(oneSet) < 2 : \n pass \n \n for x in range(1, max(floor(len(oneSet) / 2),2) ):\n \n comb = combinations(oneSet, x)\n for item in comb:\n remaining = tuple(x for x in oneSet if x not in item)\n self._rules[(item,remaining)] = 0\n self._rules[(remaining,item)] = 0", "def extract_rule_names(self):\n if self.scanner == YARA:\n return sorted({result['rule'] for result in self.results})\n if self.scanner == CUSTOMS and 'matchedRules' in self.results:\n return self.results['matchedRules']\n # We do not have support for the remaining scanners (yet).\n return []", "def compliances(self) -> Sequence['outputs.GetRulesRuleComplianceResult']:\n return pulumi.get(self, \"compliances\")", "def get_rules(app):\n rules = [\n Rule('/', endpoint='home', handler='apps.busstopped.handlers.MainPage'),\n Rule('/ajax/busstopped/<line>/<direction>', endpoint='ajax-busstopped', handler='apps.busstopped.handlers.AjaxGetBusStopped'),\n Rule('/ajax/point', endpoint='ajax-point', handler='apps.busstopped.handlers.AjaxGetBusStopTimes'),\n Rule('/ajax/getbuspaths', endpoint='ajax-getbuspath', handler='apps.busstopped.handlers.AjaxGetBusPath'),\n Rule('/faq', endpoint='faq', handler='apps.busstopped.handlers.FAQPage'),\n Rule('/changelog', endpoint='change-log', handler='apps.busstopped.handlers.ChangeLogPage'),\n Rule('/info', endpoint='info', handler='apps.busstopped.handlers.InfoPage'),\n Rule('/addpoint', endpoint='add_point', handler='apps.busstopped.handlers.AddPointDocPage'),\n Rule('/news', endpoint='news', handler='apps.busstopped.handlers.NewsPage'),\n Rule('/parse', endpoint='parse', handler='apps.busstopped.handlers.ParseTimesPage'),\n ]\n\n return rules", "def get(self, *args):\n return _libsbml.ListOfRules_get(self, *args)", "def test_list_rules(self):\n pass", "def get_all_rules(self):\n\n rules = set()\n for a_dict in self.get_dicts():\n rules = keywords.union(a_dict['rules'])\n return sorted(keywords)", "def _apply_commit_rules(rules, commit):\n all_violations = []\n for rule in rules:\n violations = rule.validate(commit)\n if violations:\n all_violations.extend(violations)\n return all_violations", "def rules(self) -> pulumi.Output[Sequence['outputs.BucketLifecycleConfigurationV2Rule']]:\n return pulumi.get(self, \"rules\")", "def get_rules():\n rules = []\n\n for app_module in get_config('tipfy', 'apps_installed'):\n try:\n # Load the urls module from the app and extend our rules.\n app_rules = import_string('%s.urls' % app_module)\n rules.extend(app_rules.get_rules())\n except ImportError:\n pass\n\n return rules", "def test_a1_check_rules(self):\n # Has rule\n rule = logic.check_rules(1, 1)\n self.assertEqual(rule, 1)\n rule = logic.check_rules(1, 2)\n self.assertEqual(rule, -1)\n rule = logic.check_rules(1, 4)\n self.assertEqual(rule, 3)\n rule = logic.check_rules(0, 3)\n self.assertEqual(rule, 4)\n rule = logic.check_rules(1, 8)\n self.assertEqual(rule, 3)\n rule = logic.check_rules(1, 0)\n self.assertEqual(rule, 1)\n\n # No rule match\n rule = logic.check_rules(0, 1)\n self.assertEqual(rule, -1)\n rule = logic.check_rules(0, 0)\n self.assertEqual(rule, -1)\n with self.assertRaises(ValueError):\n rule = logic.check_rules(1, -1)\n with self.assertRaises(ValueError):\n rule = logic.check_rules(1, 9)", "def get_rule_names(self):\n return self.rules.keys()", "def apply_rules(self):\n if len(self.rules) == 0:\n return\n for gene in self.population:\n for rule in self.rules:\n if rule.type == \"gene\":\n rule.check_and_apply(gene)", "def getChecks(self):\r\n raise AbstractError\r\n return []", "def rules(cls):\n rules_CityscapesTestConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesTestConfig", "def policy_rules(self) -> Sequence[Any]:\n return pulumi.get(self, \"policy_rules\")", "def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BucketLifecycleConfigurationV2RuleArgs']]]]:\n return pulumi.get(self, \"rules\")", "def rules(cls):\n rules_CityscapesConfig = {\"batch_size\": {\"type\": int},\n \"root_path\": {\"type\": str},\n \"num_parallel_batches\": {\"type\": int},\n \"fixed_size\": {\"type\": bool}\n }\n return rules_CityscapesConfig", "def __init__(self, rules):\n self.rules = rules\n\n self._rhs_rules = defaultdict(list)\n self._rhs_unary_rules = defaultdict(list)\n\n self._nonterm = set(rule.lhs for rule in rules)\n self._term = set(token for rhs in chain(rule.rhs for rule in rules)\n for token in rhs if token not in self._nonterm)\n\n for rule in rules:\n _, rhs, _ = rule\n self._rhs_rules[rhs].append(rule)\n\n for rhs_rules in self._rhs_rules.values():\n rhs_rules.sort(key=lambda r: r.log_prob, reverse=True)\n\n self._is_cnf = all(len(rule.rhs) == 1\n or (len(rule.rhs) == 2\n and all(s in self._nonterm for s in rule.rhs))\n for rule in self.rules)", "def create_rules(self, grids):\n from ..models import Rule\n\n alts = [grid.get_alternative_total_rating_tuples() for grid in grids]\n combinations = itertools.product(*alts)\n rules = []\n for combi in combinations:\n alts, ratings = zip(*combi)\n rules.append(Rule(alts, sum(ratings)))\n\n rules.sort()\n\n return rules", "def make_all_rules(self):\n\n def compatible(pattern1, pattern2, direction):\n \"\"\"Returns `True` if `pattern2` is compatible with `pattern1` in the `direction`,\n otherwise return `False`.\"\"\"\n if direction == 0:\n return pattern1[:-1] == pattern2[1:]\n if direction == 2:\n return [line[:-1] for line in pattern1] == [line[1:] for line in pattern2]\n\n for index in range(len(self.patterns)):\n for ind in range(index + 1):\n for direction in (0, 2):\n if compatible(self.patterns[index], self.patterns[ind], direction):\n self.rules[index][direction].add(ind)\n self.rules[ind][direction + 1].add(index)", "def rules(cls):\n rules_CityscapesTrainConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesTrainConfig", "def rules(self):\r\n return Acls(self)", "def rule_conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuleRuleConditionArgs']]]]:\n return pulumi.get(self, \"rule_conditions\")", "def _compile_rules(self):\n for state, table in self.RULES.items():\n patterns = []\n actions = []\n nextstates = []\n for i, row in enumerate(table):\n if len(row) == 2:\n pattern, action_ = row\n nextstate = None\n elif len(row) == 3:\n pattern, action_, nextstate = row\n else:\n fstr = \"invalid RULES: state {}, row {}\"\n raise CompileError(fstr.format(state, i))\n patterns.append(pattern)\n actions.append(action_)\n nextstates.append(nextstate)\n reobj = re.compile(\"|\".join(\"(\" + p + \")\" for p in patterns))\n self._rules[state] = (reobj, actions, nextstates)", "def get_real_rules():\n real = {}\n\n for name, rule in RULES.items():\n q = GraphMetric.select(GraphMetric.metric).where(\n GraphMetric.metric % name).group_by(GraphMetric.metric)\n\n for i in q:\n real[i.metric] = rule\n return real", "def eqv_path_rules(self) -> List[Tuple[CombinatorialClassType, Rule]]:\n eqv_path_rules = []\n curr = self.comb_class\n for rule in self.rules:\n eqv_path_rules.append((curr, rule))\n curr = rule.children[0]\n return eqv_path_rules", "def generate_rules(self):\n for rule in self._parser.conditionals:\n\n all_in_facts, matrix = self._generate_rules(rule)\n if all_in_facts is True:\n self.new_fact_from_facts(rule)\n else:\n facts = self._parser.conditionals[rule][1]\n #print(rule, facts, matrix)", "def find_matching_rules(self, requested_uri):\n rulesfound = []\n for rule in RewriteRule.objects.filter(register=self) :\n (matched,nestedrule) = rule.match_inheritance( requested_uri)\n if matched :\n rulechain = (rule,) + nestedrule.get_subrules() + (nestedrule,)\n rulesfound.append(rulechain)\n # get all sub rules not bound to a register \n return rulesfound", "def config_rules(self) -> Sequence['outputs.GetCompliancePacksPackConfigRuleResult']:\n return pulumi.get(self, \"config_rules\")", "def rule_list():\r\n #check RIGHT and DOWN borders\r\n all_blocks = get_blocks(-1)\r\n allowed = {}\r\n for i in range(len(all_blocks)): #index\r\n for j in range(len(all_blocks)):\r\n #check RIGHT border\r\n allowed[(i,j)] = [False,False]\r\n if all_blocks[i][1][2] == all_blocks[j][1][0]:\r\n allowed[(i,j)][0] = True\r\n #check DOWN border\r\n if all_blocks[i][2][1] == all_blocks[j][0][1]:\r\n allowed[(i,j)][1] = True\r\n return allowed", "def make_rules(UI):\n \n Conditionals = Conditional_Database(UI)\n location = UI.location\n \n \n Rules = []\n if location in ['Rio de Janeiro']:\n\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Br_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Br_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Br_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Br_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Br_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Br_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Br_Rule7func(policy_input)))\n\n elif location in ['Indonesia']:\n #National\n Rules.append(SDlib.Rule('Implement Some Restrictions Nationwide', 1, \n func = lambda policy_input: Conditionals.In_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Nationwide', 2, \n func = lambda policy_input: Conditionals.In_Rule2func(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Nationwide', 3, \n func = lambda policy_input: Conditionals.In_Rule3func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Nationwide', 4, \n func = lambda policy_input: Conditionals.In_Rule4func(policy_input))) \n #Java\n Rules.append(SDlib.Rule('Implement Some Restrictions Java - Zonal', 5, \n func = lambda policy_input: Conditionals.In_Rule1func_j(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Java - Zonal', 6, \n func = lambda policy_input: Conditionals.In_Rule2func_j(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Java - Zonal', 7, \n func = lambda policy_input: Conditionals.In_Rule3func_j(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Java - Zonal', 8, \n func = lambda policy_input: Conditionals.In_Rule4func_j(policy_input))) \n\n #Sulawesi\n Rules.append(SDlib.Rule('Implement Some Restrictions Sulawesi - Zonal', 9, \n func = lambda policy_input: Conditionals.In_Rule1func_s(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Sulawesi - Zonal', 10, \n func = lambda policy_input: Conditionals.In_Rule2func_s(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Sulawesi - Zonal', 11, \n func = lambda policy_input: Conditionals.In_Rule3func_s(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Sulawesi - Zonal', 12, \n func = lambda policy_input: Conditionals.In_Rule4func_s(policy_input))) \n\n elif location in ['Chile']:\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Ch_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Ch_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Ch_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Ch_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Ch_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Ch_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Ch_Rule7func(policy_input)))\n \n elif location in ['Santiago']:\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Sa_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Sa_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Sa_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Sa_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Sa_Rule5func(policy_input)))\n \n if location in ['Querétaro']:\n\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Br_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Br_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Br_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Br_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Br_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Br_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Br_Rule7func(policy_input)))\n\n return Rules", "def test_all_rules_are_tested():\n tested_rules = defaultdict(list)\n for test in TESTS:\n cls = test[0]\n rule = test[1]\n tested_rules[cls].append(rule)\n for cls in tested_rules.keys():\n if hasattr(cls, '_binary_rules'):\n rules = set(cls._binary_rules.keys())\n elif hasattr(cls, '_rules'):\n rules = set(cls._rules.keys())\n assert set(tested_rules[cls]) == rules", "def match_rules(rules, wm):\n res = []\n for r in rules:\n new_patterns = match_rule(r[0],r[1],r[2], wm)\n if new_patterns:\n print(\" Match succeeds\")\n print(\" Adding assertions to WM\")\n else:\n print(\" Match fails\")\n for n in new_patterns:\n if (n not in wm) and (n not in res):\n print(\" \",n)\n res.append(n)\n # print(\"new patterns so far = \", res)\n # print()\n # for testing\n # break\n return res", "def evaluate(self, request_info: RequestInfo):\n rule_results = [(rule, rule.matches(request_info)) for rule in self.rules]\n\n overriding_blocking_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.DENY and result == MatchResult.OVERRIDE\n ]\n overriding_allowing_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.ALLOW and result == MatchResult.OVERRIDE\n ]\n\n if len(overriding_allowing_rules) > 0:\n return Action.ALLOW, overriding_allowing_rules\n\n if len(overriding_blocking_rules) > 0:\n return Action.DENY, overriding_blocking_rules\n\n blocking_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.DENY and result == MatchResult.MATCH\n ]\n allowing_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.ALLOW and result == MatchResult.MATCH\n ]\n\n if len(allowing_rules) > 0:\n return Action.ALLOW, allowing_rules\n\n if len(blocking_rules) > 0:\n return Action.DENY, blocking_rules\n\n return Action.NOOP, None", "def get_searchable_rules(rules):\n searchable_rules = {rule.variable: {} for rule in rules}\n for rule in rules:\n searchable_rules[rule.variable][tuple(rule.derivation)] = rule\n return searchable_rules", "def get_rules_for_action(self, action_type: ActionType) -> List[\"Rule\"]:\n return [rule for rule in self.rules if rule.action_type == action_type]", "def get_rules(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/GetRulesV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"GetRulesV1\",\n keywords=kwargs,\n params=handle_single_argument(args, parameters, \"ids\")\n )", "def getRule(self, *args):\n return _libsbml.Model_getRule(self, *args)", "def rules_for_request(request):\n surt = request.GET.get('surt')\n warc = request.GET.get('warc')\n capture_date = int(request.GET.get('capture-date')) # XXX try/except\n if surt is None or warc is None or capture_date is None:\n return error('surt, warc, and capture-date query string params'\n ' are all required', {})\n surt = Surt(surt)\n tree = tree_for_surt(surt)\n warc_parts = warc.split('-') # XXX validate warc name\n warc_date = int(warc_parts[4][0:-5]) # Parse an int out of the date minus ms\n applicable_rules = []\n for rule in tree:\n start = int(rule.capture_start.strftime('%Y%m%d%H%M'))\n end = int(rule.capture_end.strftime('%Y%m%d%H%M'))\n if ((warc_date > start and warc_date < end) and\n (capture_date > start and capture_date < end)):\n applicable_rules.append(rule)\n # Here is where we would make a surface-level decision on the action to\n # be taken (block, auth, allow, rewrite, etc). A point of optimization would\n # be to use django to only select the rules matching the date range, but for\n # now, we select the whole tree. Also, date comparisons would probably be\n # faster than coercing to strings, then to ints, but I was running short\n # on time.\n return success([rule.summary() for rule in applicable_rules])", "def getSpecRules(self, rhs):\n if rhs not in self.itemSet:\n print('Please input a term contain in the term-set !')\n return None\n \n rules = dict()\n for key, value in self.freqSet.items():\n for item in value:\n if rhs.issubset(item) and len(item) > 1:\n item_supp = self.getSupport(item)\n item = item.difference(rhs)\n conf = item_supp / self.getSupport(item)\n if conf >= self.minConf:\n rules[item] = conf\n return rules", "def rules():\r\n print(\"\\n\\\r\n \\tMmmmm... Si vous etes ici, j'imagine que votre décision est prise !\\n\\\r\n Je ne veux pas vous inciter à arreter mais vous etes conscient des risques ?\\n\\\r\n Vous participez quand meme au plus grand combat de tous les temps...\\n\\n\\\r\n \\tBon je vous explique les règles : Vous allez affronter un autre titan en duel\\n\\\r\n Vous avez chacun des tentatives d'infliger une attaque. Si elles réussissent,\\n\\\r\n Vous aurez le choix d'infliger entre 0 et 100 dégats. Enfin non puisque tout\\n\\\r\n sera tiré au sort :)\\n\\\r\n La partie s'arrete quand l'un des deux titans est mort\\n\\\r\n \\tBonne chance a vous!\\n\")", "def _get_version_rules(self, vuln_versions):\n rules = []\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n regex_vr = \"[<>=*]+\"\n \"\"\"For all the vulnerable versions information that we get, we need to create\n comparable version object so that we can apply these rules on top of all the available\n versions of a pkg in the market.\"\"\"\n for version in vuln_versions:\n version = version.replace(\" \", \"\")\n sub_vers = version.split('||')\n for sub_ver in sub_vers:\n tmp = []\n vr_relations = re.split(regex_vr, sub_ver)\n op_relations = re.split(regex_op, sub_ver)\n # Single affected version.\n if len(vr_relations) == 1:\n tmp.append({\n 'key': \"=\",\n 'val': ComparableVersion(vr_relations[0])\n })\n # All versions affected.\n elif len(op_relations) == 1 and op_relations[0] == '*':\n tmp.append({\n 'key': \"*\",\n 'val': \"\"\n })\n else:\n for i in range(len(op_relations) - 1):\n tmp.append({\n 'key': op_relations[i],\n 'val': ComparableVersion(vr_relations[i + 1])\n })\n rules.append(tmp)\n\n return rules", "def validate(self):\n self._validate_rule(self.rules, allow_self_reference=self.allow_self_reference)\n self._validate_operator_arguments(self.rules)\n return self.errors", "def custom_rules(self) -> pulumi.Output[Optional[Sequence['outputs.FirewallPolicyCustomRule']]]:\n return pulumi.get(self, \"custom_rules\")", "def compile_rule(self, cmd):\n cfg, rules = self.config, self.rules\n if cmd == None:\n return Rule()\n if isinstance(cmd, dict):\n r = []\n if 'as' in cmd:\n r += [self.compile_rule(cmd['as'])]\n if 'find' in cmd:\n r += [FindRule(cmd['find'], self)]\n if 'split' in cmd:\n c = cmd['split']\n if 'by' not in c:\n raise ConfigError('\"split.by\" is not defined!')\n if 'as' not in c:\n raise ConfigError('\"split.as\" is not defined!')\n return SplitRule(c['by'], self.compile_rule(c['as']), self)\n if 'count' in cmd:\n r += [CountRule(cmd['count'], self)]\n if 'group' in cmd:\n r += [GroupRule(cmd['group'], self)]\n if len(r) == 0:\n return Rule()\n return AndRule(r) if len(r) > 1 else r[0]\n if isinstance(cmd, list):\n return AndRule([self.compile_rule(c) for c in cmd])\n if cmd[0] == '?':\n return FindRule(cmd[1:], self)\n if cmd[0] == '$':\n #reference\n key = cmd[1:]\n if key in rules:\n return rules[key]\n if key not in cfg:\n raise ConfigError('Reference \"%s\" not defined!' % cmd)\n if key in self.ref:\n raise ConfigError('Recursively reference to key \"%s\"' % key)\n self.ref.add(key)\n rules[key] = self.compile_rule(cfg[key])\n return rules[key]\n return AsRule(cmd, self)", "def _get_all_checks(self):\n this_class = self.__class__\n\n check_list = [\n getattr(self, func)\n for func in dir(self.__class__)\n if callable(getattr(this_class, func))\n and func.startswith(self.check_prefix)\n ]\n\n return check_list", "def get_rules(self, obj, current_path=[]):\n # If node isn't a rule or dictionary\n if type(obj) != dict:\n return []\n\n # If node is a rule return its location and its details\n if self.is_rule(obj):\n return [([self.field] + current_path, obj)]\n\n rules = []\n for path, val in obj.items():\n rules = rules + self.get_rules(val, current_path + [path])\n return rules", "def get_version_rules(self, vuln_versions):\n rules = []\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n regex_vr = \"[<>=*]+\"\n \"\"\"For all the vulnerable versions information that we get, we need to create\n comparable version object so that we can apply these rules on top of all the available\n versions of a pkg in the market.\"\"\"\n for version in vuln_versions:\n version = version.replace(\" \", \"\")\n version = version.replace(\"+incompatible\", \"\")\n sub_vers = version.split('||')\n for sub_ver in sub_vers:\n tmp = []\n vr_relations = re.split(regex_vr, sub_ver)\n op_relations = re.split(regex_op, sub_ver)\n # Single affected version.\n if len(vr_relations) == 1:\n tmp.append({\n 'key': \"=\",\n 'val': ComparableVersion(vr_relations[0])\n })\n # All versions affected.\n elif len(op_relations) == 1 and op_relations[0] == '*':\n tmp.append({\n 'key': \"*\",\n 'val': \"\"\n })\n else:\n for i in range(len(op_relations) - 1):\n tmp.append({\n 'key': op_relations[i],\n 'val': ComparableVersion(vr_relations[i + 1])\n })\n rules.append(tmp)\n\n return rules", "def rules():\n print \"Loading Rules\"\n return render_template(\"rules.html\")", "def parseFile(file,rules = None):\n if not rules: rules = RuleCollection()\n buf = \"\"\n for line in open(file,'r'):\n if not line[0]=='#':\n buf += line\n try:\n for (ptree,lo,hi) in ruleNT.scanString(buf):\n rules.add(Parser._convertRule(ptree))\n return rules\n except KeyError:\n print 'error near ',lo,'in',file\n return rules", "def get_rules(paths):\n raw_rules = []\n for path in paths:\n with open(path, \"r\", encoding=\"utf8\") as f:\n raw_rules += f.read().splitlines()\n \n return AdblockRules(raw_rules)", "def list_role_inference_rules(self):\n raise exception.NotImplemented() # pragma: no cover", "def get_rules(paths):\n raw_rules = []\n for path in paths:\n with open(path, \"r\", encoding=\"utf8\") as f:\n raw_rules += f.read().splitlines()\n \n return AdblockRules(raw_rules)", "def check_custom_rules(self):\n if self.custom_rules:\n with redirect_stdout(PrintLogger(name=\"pylint\", log_level=\"INFO\")):\n passed_custom, override = self.custom_rules(self.results.linter.stats, self.fname)\n if not passed_custom:\n self.logging.warning(\"{} FAILED CUSTOM CHECKS\".format(self.fname))\n self.custom_failed.append(self.fname)\n return passed_custom, override\n return False, False", "def readrules(self, fomalines):\n for lineno, l in enumerate(fomalines):\n if 'define' in l or 'def ' in l:\n rulecom = l.split(' #')\n r = re.findall(\"(defi?n?e?)\\s+(\\S+)\\s+([^;]+)\", rulecom[0])\n if len(r[0]) != 3:\n print \"Syntax error on line %i\" % lineno\n (_, rulename, rule) = r[0]\n if len(rulecom) > 1:\n commentline = rulecom[1].strip()\n else:\n commentline = ''\n self.rule_add(rulename, rule, commentline)\n if 'chain' in l:\n l = l.replace(';','')\n chain = re.findall('chain\\s+(.*)', l)\n rc = chain[0].replace(' ','').split(',')\n self.rc = rc", "def GetZeroQueryRules(input_file_name):\n rules = []\n with open(input_file_name, 'r') as input_file:\n for line in input_file:\n if line.startswith('#'):\n continue\n line = line.rstrip('\\r\\n')\n if not line:\n continue\n\n tokens = line.split('\\t')\n key = tokens[0]\n values = tokens[1].split(',')\n\n rules.append((key, values))\n rules.sort(lambda x, y: cmp(x[0], y[0])) # For binary search\n return rules", "def license_rules(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"license_rules\")", "def ip_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"ip_rules\")", "def apply_rules(term: Term, rules):\n return functools.reduce(apply_rule, rules, term)", "def get_rules_for_type(type):\n\n rules = get_db().execute('SELECT * FROM ruleset WHERE type=?', (type,)).fetchall()\n\n return rules" ]
[ "0.76105815", "0.7469598", "0.73660934", "0.7312732", "0.721728", "0.71480995", "0.70150936", "0.68892413", "0.68892413", "0.67432487", "0.66999936", "0.6694647", "0.6694647", "0.669131", "0.6645618", "0.6565479", "0.65333295", "0.6530434", "0.65208477", "0.651483", "0.65071875", "0.64957", "0.64917344", "0.6485106", "0.64597285", "0.645632", "0.6406928", "0.63938135", "0.6371905", "0.63318115", "0.6326222", "0.6316648", "0.6273836", "0.6265625", "0.6256686", "0.62564653", "0.62508076", "0.62500155", "0.6195834", "0.6165039", "0.61278063", "0.6122602", "0.61147696", "0.61017627", "0.60959786", "0.6069379", "0.6068919", "0.60648304", "0.6062324", "0.6062047", "0.6041647", "0.6026492", "0.6014971", "0.6011419", "0.6010809", "0.6007865", "0.59890103", "0.5986201", "0.59726673", "0.5965942", "0.59536374", "0.594658", "0.593446", "0.5930544", "0.5926714", "0.5888764", "0.5886819", "0.58844143", "0.58739257", "0.58707935", "0.58537644", "0.5829875", "0.5828193", "0.58248174", "0.58021975", "0.57991034", "0.5760543", "0.5746575", "0.5735376", "0.5733241", "0.57327616", "0.57306993", "0.5726072", "0.57221556", "0.57197666", "0.5710229", "0.57086056", "0.5704269", "0.56987405", "0.568455", "0.56782544", "0.56663966", "0.56466436", "0.56395495", "0.5600726", "0.55900496", "0.5580071", "0.5573578", "0.5553044", "0.5550792" ]
0.60942304
45
Return rules for checking.
def rules(cls): rules_CityscapesTestConfig = {"batch_size": {"type": int}, "list_path": {"type": str} } return rules_CityscapesTestConfig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rules(cls):\n raise NotImplementedError()", "def get_rules(self):\n rules = []\n for item in self.name:\n rules.append(item)\n return rules", "def get_rules(self):\n rules = []\n for item in self.rule:\n rules.append(item)\n return rules", "def get_rules(self):\n # TODO: Implement\n self.traverse2(self.tree, [])\n return self.rules", "def rules(self):\n return tuple(e for e in self.entries if e.is_rule)", "def get_rules(cls) -> list:\n return [factory() for factory in cls._rules_factories]", "def rules(self):\n self.rule1 = min(self.location_is_lessDemand, self.area_is_small, self.unfunishing)\n self.rule2 = min(self.location_is_lessDemand, max(self.area_is_small, self.area_is_average), self.access_is_good)\n self.rule3 = min(self.location_is_veryHighDemand, self.area_is_average, self.fac_is_low, self.access_is_average)\n self.rule4 = min(self.location_is_veryLessDemand, self.area_is_verysmall, self.fully_funishing)\n self.rule5 = min(self.location_is_lessDemand, self.fac_is_average, max(self.area_is_small, self.area_is_average))\n self.rule6 = min(max(self.location_is_lessDemand, self.location_is_averageDemand), self.access_is_good)\n self.rule7 = min(self.location_is_lessDemand, self.access_is_good, self.area_is_large, self.partially_funishing)\n self.rule8 = min(self.location_is_highDemand, self.access_is_good, max(self.bed_is_less, self.bath_is_average))\n self.rule9 = min(self.location_is_veryHighDemand, self.area_is_large, self.unfunishing)\n self.rule10 = min(self.access_is_good, self.area_is_average, (1 - self.unfunishing))\n self.rule11 = min(self.access_is_good, self.area_is_large, self.partially_funishing, self.bed_is_more, self.bath_is_more)", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperations']]:\n return pulumi.get(self, \"rules\")", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperations']]:\n return pulumi.get(self, \"rules\")", "def rules(cls):\n rules_Cityscapes = {\"common\": {\"type\": dict},\n \"train\": {\"type\": dict},\n \"val\": {\"type\": dict},\n \"test\": {\"type\": dict}\n }\n return rules_Cityscapes", "def getListOfRules(self):\n return self.model.getListOfRules()", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperationsPatch']]:\n return pulumi.get(self, \"rules\")", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperationsPatch']]:\n return pulumi.get(self, \"rules\")", "def validate(self):\n for rule in self.get_rules():\n rule.validate(self.get_val())", "def rule_conditions(self) -> pulumi.Output[Sequence['outputs.RuleRuleCondition']]:\n return pulumi.get(self, \"rule_conditions\")", "def filter_rules(self) -> list:\n return self.transform(self._tree), self._rules", "def hrules(self):\n ...", "def rule_conditions(self) -> pulumi.Input[Sequence[pulumi.Input['RuleRuleConditionArgs']]]:\n return pulumi.get(self, \"rule_conditions\")", "def rule_conditions(self) -> Sequence['outputs.GetRulesRuleRuleConditionResult']:\n return pulumi.get(self, \"rule_conditions\")", "def rules(self) -> List['outputs.PreventionInspectTemplateInspectConfigRuleSetRule']:\n return pulumi.get(self, \"rules\")", "def get_rules(self):\n return [phi for psi in self._Psi for phi in psi]", "def rules(self) -> FrozenOrderedSet[Union[Callable, Rule]]:\n return self._rules", "def test_rules():", "def getListOfRules(self, *args):\n return _libsbml.Model_getListOfRules(self, *args)", "def check_rules(self):\n conditions = ('element', 'name', 'type')\n metric = set(['type', 'value'])\n \n elements = {}\n for rule in self.rules:\n if not isinstance(rule, dict):\n self.fail(msg='Rule format is not type dict: %s, type: %s'\n % (rule, type(rule)))\n if 'name' not in rule:\n self.fail(msg='Name is a required field for all rules')\n if 'match_condition' in rule:\n if not isinstance(rule['match_condition'], list):\n self.fail(msg='Match condition is expected to be a list')\n\n for match in rule['match_condition']:\n if match.get('type') == 'metric':\n if set(match.keys()) ^ metric:\n self.fail(msg='Metric definition can only have '\n 'values: %s, given: %s' % (list(metric), match))\n continue\n # Validate fields in condition\n for field in conditions:\n if field not in match:\n self.fail(msg='Match condition is missing a required '\n 'key: %r ,required: %s' % (match, list(conditions)))\n if field == 'element' and match.get(field) not in access_list:\n self.fail(msg='Match condition element is not valid: %s, '\n 'valid types: %s' % (match.get(field), list(access_list)))\n elif field == 'type' and match[field] not in match_conditions:\n self.fail(msg='Match condition type is not valid: %s, '\n 'valid types: %s' % (match[field], list(match_conditions)))\n \n element = match.get('element')\n # peer_address can only be type engine or external_bgp_peer\n if match['type'] == 'peer_address' and element not \\\n in ('engine', 'external_bgp_peer'):\n self.fail(msg='A peer address element can only be of type '\n 'engine or external_bgp_peer, provided definition: %s' % match)\n elif match['type'] == 'next_hop' and ('prefix_list' not in \\\n element and 'access_list' not in element):\n self.fail(msg='A next hop definition must be either an access '\n 'list or prefix list type, provided defintion: %s' % match)\n \n if 'engine' in element:\n element = 'single_fw,fw_cluster,virtual_fw'\n elements.setdefault(\n element, set([])).add(match.get('name'))\n \n return [elements] if elements else []", "def effective_rules(self) -> pulumi.Output[Sequence[Any]]:\n return pulumi.get(self, \"effective_rules\")", "def vrules(self):\n ...", "def rules(self):\n return self._alert_rules_client", "def extract_rules(self, labels=None):\n # Extract flat list of rules in array form\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for dt in self._rule_dump]))\n \n # Convert each sub-rule into text, join together with '&' and then add to rules\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=labels, scaler=self.ext_scaler)) for r in rules])\n \n return self.rules", "def checks(self):\r\n return checks.Checks(self)", "def create_url_rules(self):\n return []", "def game_rules(self):\n self.valid_answers = dict()\n\n yield ScoreBoard(win=0, lose=0, tie=0)\n yield Results(winner='rock👊', loser='scissors✌', why='Rock👊 smashes scissors✌')\n yield Results(winner='paper👋', loser='rock👊', why='Paper👋 covers rock👊')\n yield Results(winner='scissors✌', loser='paper👋', why='Scissors✌ cut paper👋')\n yield ValidAnswer(answer='rock👊', key='r')\n yield ValidAnswer(answer='paper👋', key='p')\n yield ValidAnswer(answer='scissors✌', key='s')", "def items(self):\n return self._rules_by_lhs.items()", "def conditions(self):\n return self._separated_constructs(RuleCondition)", "def rules(self, transfer, robot_settings, dilution_settings):\n return []", "def getAllDecisionRules(self):\n\n #check this shit lol?\n thetas = self.getAllTheta()\n human_actions = self.getAllHumanActions()\n return [list(zip(thetas, item)) for item in itertools.product(human_actions, repeat=len(thetas))]", "def get_conditional_rules(self):\n conditional_rules = []\n\n for field in self.form.get_prep_value():\n\n rules = field['value'].get('rules', None)\n if rules:\n field_id = field['value'].get('field_id', None)\n if field_id:\n rules['field_name'] = field_id\n else:\n rules['field_name'] = clean_form_field_name(field['value']['label'])\n rules['required'] = field['value'].get('required', False)\n rules['field_type'] = field.get('type', None)\n conditions = rules.get('conditions', None)\n if len(conditions):\n for condition in conditions:\n del(condition['id'])\n del(condition['type'])\n condition['field_name'] = clean_form_field_name(condition['value']['field_name'])\n condition['rule'] = condition['value']['rule']\n condition['value'] = condition['value'].get('value', None)\n\n conditional_rules.append(rules)\n\n return conditional_rules", "def get_rules(self, M):\n return list(itertools.chain.from_iterable(\n list(itertools.chain.from_iterable(\n [[self.get_boxrules(x, M), self.get_unaryrules(x, M),\n self.at_least_one_rules(x, M), self.get_columnrules(x, M),\n self.get_rowrules(x, M)] for x in itertools.product(range(1, M+1),\n range(1, M+1))]\n ))\n ))", "def rules(self) -> pulumi.Input[Sequence[pulumi.Input['BucketLifecycleConfigurationV2RuleArgs']]]:\n return pulumi.get(self, \"rules\")", "def getRules(self):\n self._rules = {}\n _RuleSet = self._sets[self._currentSet - 1 ]\n for oneSet in _RuleSet :\n \n if len(oneSet) < 2 : \n pass \n \n for x in range(1, max(floor(len(oneSet) / 2),2) ):\n \n comb = combinations(oneSet, x)\n for item in comb:\n remaining = tuple(x for x in oneSet if x not in item)\n self._rules[(item,remaining)] = 0\n self._rules[(remaining,item)] = 0", "def extract_rule_names(self):\n if self.scanner == YARA:\n return sorted({result['rule'] for result in self.results})\n if self.scanner == CUSTOMS and 'matchedRules' in self.results:\n return self.results['matchedRules']\n # We do not have support for the remaining scanners (yet).\n return []", "def compliances(self) -> Sequence['outputs.GetRulesRuleComplianceResult']:\n return pulumi.get(self, \"compliances\")", "def get_rules(app):\n rules = [\n Rule('/', endpoint='home', handler='apps.busstopped.handlers.MainPage'),\n Rule('/ajax/busstopped/<line>/<direction>', endpoint='ajax-busstopped', handler='apps.busstopped.handlers.AjaxGetBusStopped'),\n Rule('/ajax/point', endpoint='ajax-point', handler='apps.busstopped.handlers.AjaxGetBusStopTimes'),\n Rule('/ajax/getbuspaths', endpoint='ajax-getbuspath', handler='apps.busstopped.handlers.AjaxGetBusPath'),\n Rule('/faq', endpoint='faq', handler='apps.busstopped.handlers.FAQPage'),\n Rule('/changelog', endpoint='change-log', handler='apps.busstopped.handlers.ChangeLogPage'),\n Rule('/info', endpoint='info', handler='apps.busstopped.handlers.InfoPage'),\n Rule('/addpoint', endpoint='add_point', handler='apps.busstopped.handlers.AddPointDocPage'),\n Rule('/news', endpoint='news', handler='apps.busstopped.handlers.NewsPage'),\n Rule('/parse', endpoint='parse', handler='apps.busstopped.handlers.ParseTimesPage'),\n ]\n\n return rules", "def get(self, *args):\n return _libsbml.ListOfRules_get(self, *args)", "def test_list_rules(self):\n pass", "def rules(cls):\n rules_CityscapesValConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesValConfig", "def get_all_rules(self):\n\n rules = set()\n for a_dict in self.get_dicts():\n rules = keywords.union(a_dict['rules'])\n return sorted(keywords)", "def _apply_commit_rules(rules, commit):\n all_violations = []\n for rule in rules:\n violations = rule.validate(commit)\n if violations:\n all_violations.extend(violations)\n return all_violations", "def rules(self) -> pulumi.Output[Sequence['outputs.BucketLifecycleConfigurationV2Rule']]:\n return pulumi.get(self, \"rules\")", "def get_rules():\n rules = []\n\n for app_module in get_config('tipfy', 'apps_installed'):\n try:\n # Load the urls module from the app and extend our rules.\n app_rules = import_string('%s.urls' % app_module)\n rules.extend(app_rules.get_rules())\n except ImportError:\n pass\n\n return rules", "def test_a1_check_rules(self):\n # Has rule\n rule = logic.check_rules(1, 1)\n self.assertEqual(rule, 1)\n rule = logic.check_rules(1, 2)\n self.assertEqual(rule, -1)\n rule = logic.check_rules(1, 4)\n self.assertEqual(rule, 3)\n rule = logic.check_rules(0, 3)\n self.assertEqual(rule, 4)\n rule = logic.check_rules(1, 8)\n self.assertEqual(rule, 3)\n rule = logic.check_rules(1, 0)\n self.assertEqual(rule, 1)\n\n # No rule match\n rule = logic.check_rules(0, 1)\n self.assertEqual(rule, -1)\n rule = logic.check_rules(0, 0)\n self.assertEqual(rule, -1)\n with self.assertRaises(ValueError):\n rule = logic.check_rules(1, -1)\n with self.assertRaises(ValueError):\n rule = logic.check_rules(1, 9)", "def get_rule_names(self):\n return self.rules.keys()", "def apply_rules(self):\n if len(self.rules) == 0:\n return\n for gene in self.population:\n for rule in self.rules:\n if rule.type == \"gene\":\n rule.check_and_apply(gene)", "def getChecks(self):\r\n raise AbstractError\r\n return []", "def policy_rules(self) -> Sequence[Any]:\n return pulumi.get(self, \"policy_rules\")", "def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BucketLifecycleConfigurationV2RuleArgs']]]]:\n return pulumi.get(self, \"rules\")", "def rules(cls):\n rules_CityscapesConfig = {\"batch_size\": {\"type\": int},\n \"root_path\": {\"type\": str},\n \"num_parallel_batches\": {\"type\": int},\n \"fixed_size\": {\"type\": bool}\n }\n return rules_CityscapesConfig", "def __init__(self, rules):\n self.rules = rules\n\n self._rhs_rules = defaultdict(list)\n self._rhs_unary_rules = defaultdict(list)\n\n self._nonterm = set(rule.lhs for rule in rules)\n self._term = set(token for rhs in chain(rule.rhs for rule in rules)\n for token in rhs if token not in self._nonterm)\n\n for rule in rules:\n _, rhs, _ = rule\n self._rhs_rules[rhs].append(rule)\n\n for rhs_rules in self._rhs_rules.values():\n rhs_rules.sort(key=lambda r: r.log_prob, reverse=True)\n\n self._is_cnf = all(len(rule.rhs) == 1\n or (len(rule.rhs) == 2\n and all(s in self._nonterm for s in rule.rhs))\n for rule in self.rules)", "def create_rules(self, grids):\n from ..models import Rule\n\n alts = [grid.get_alternative_total_rating_tuples() for grid in grids]\n combinations = itertools.product(*alts)\n rules = []\n for combi in combinations:\n alts, ratings = zip(*combi)\n rules.append(Rule(alts, sum(ratings)))\n\n rules.sort()\n\n return rules", "def make_all_rules(self):\n\n def compatible(pattern1, pattern2, direction):\n \"\"\"Returns `True` if `pattern2` is compatible with `pattern1` in the `direction`,\n otherwise return `False`.\"\"\"\n if direction == 0:\n return pattern1[:-1] == pattern2[1:]\n if direction == 2:\n return [line[:-1] for line in pattern1] == [line[1:] for line in pattern2]\n\n for index in range(len(self.patterns)):\n for ind in range(index + 1):\n for direction in (0, 2):\n if compatible(self.patterns[index], self.patterns[ind], direction):\n self.rules[index][direction].add(ind)\n self.rules[ind][direction + 1].add(index)", "def rules(cls):\n rules_CityscapesTrainConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesTrainConfig", "def rules(self):\r\n return Acls(self)", "def rule_conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuleRuleConditionArgs']]]]:\n return pulumi.get(self, \"rule_conditions\")", "def _compile_rules(self):\n for state, table in self.RULES.items():\n patterns = []\n actions = []\n nextstates = []\n for i, row in enumerate(table):\n if len(row) == 2:\n pattern, action_ = row\n nextstate = None\n elif len(row) == 3:\n pattern, action_, nextstate = row\n else:\n fstr = \"invalid RULES: state {}, row {}\"\n raise CompileError(fstr.format(state, i))\n patterns.append(pattern)\n actions.append(action_)\n nextstates.append(nextstate)\n reobj = re.compile(\"|\".join(\"(\" + p + \")\" for p in patterns))\n self._rules[state] = (reobj, actions, nextstates)", "def get_real_rules():\n real = {}\n\n for name, rule in RULES.items():\n q = GraphMetric.select(GraphMetric.metric).where(\n GraphMetric.metric % name).group_by(GraphMetric.metric)\n\n for i in q:\n real[i.metric] = rule\n return real", "def eqv_path_rules(self) -> List[Tuple[CombinatorialClassType, Rule]]:\n eqv_path_rules = []\n curr = self.comb_class\n for rule in self.rules:\n eqv_path_rules.append((curr, rule))\n curr = rule.children[0]\n return eqv_path_rules", "def generate_rules(self):\n for rule in self._parser.conditionals:\n\n all_in_facts, matrix = self._generate_rules(rule)\n if all_in_facts is True:\n self.new_fact_from_facts(rule)\n else:\n facts = self._parser.conditionals[rule][1]\n #print(rule, facts, matrix)", "def find_matching_rules(self, requested_uri):\n rulesfound = []\n for rule in RewriteRule.objects.filter(register=self) :\n (matched,nestedrule) = rule.match_inheritance( requested_uri)\n if matched :\n rulechain = (rule,) + nestedrule.get_subrules() + (nestedrule,)\n rulesfound.append(rulechain)\n # get all sub rules not bound to a register \n return rulesfound", "def config_rules(self) -> Sequence['outputs.GetCompliancePacksPackConfigRuleResult']:\n return pulumi.get(self, \"config_rules\")", "def rule_list():\r\n #check RIGHT and DOWN borders\r\n all_blocks = get_blocks(-1)\r\n allowed = {}\r\n for i in range(len(all_blocks)): #index\r\n for j in range(len(all_blocks)):\r\n #check RIGHT border\r\n allowed[(i,j)] = [False,False]\r\n if all_blocks[i][1][2] == all_blocks[j][1][0]:\r\n allowed[(i,j)][0] = True\r\n #check DOWN border\r\n if all_blocks[i][2][1] == all_blocks[j][0][1]:\r\n allowed[(i,j)][1] = True\r\n return allowed", "def make_rules(UI):\n \n Conditionals = Conditional_Database(UI)\n location = UI.location\n \n \n Rules = []\n if location in ['Rio de Janeiro']:\n\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Br_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Br_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Br_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Br_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Br_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Br_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Br_Rule7func(policy_input)))\n\n elif location in ['Indonesia']:\n #National\n Rules.append(SDlib.Rule('Implement Some Restrictions Nationwide', 1, \n func = lambda policy_input: Conditionals.In_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Nationwide', 2, \n func = lambda policy_input: Conditionals.In_Rule2func(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Nationwide', 3, \n func = lambda policy_input: Conditionals.In_Rule3func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Nationwide', 4, \n func = lambda policy_input: Conditionals.In_Rule4func(policy_input))) \n #Java\n Rules.append(SDlib.Rule('Implement Some Restrictions Java - Zonal', 5, \n func = lambda policy_input: Conditionals.In_Rule1func_j(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Java - Zonal', 6, \n func = lambda policy_input: Conditionals.In_Rule2func_j(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Java - Zonal', 7, \n func = lambda policy_input: Conditionals.In_Rule3func_j(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Java - Zonal', 8, \n func = lambda policy_input: Conditionals.In_Rule4func_j(policy_input))) \n\n #Sulawesi\n Rules.append(SDlib.Rule('Implement Some Restrictions Sulawesi - Zonal', 9, \n func = lambda policy_input: Conditionals.In_Rule1func_s(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Sulawesi - Zonal', 10, \n func = lambda policy_input: Conditionals.In_Rule2func_s(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Sulawesi - Zonal', 11, \n func = lambda policy_input: Conditionals.In_Rule3func_s(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Sulawesi - Zonal', 12, \n func = lambda policy_input: Conditionals.In_Rule4func_s(policy_input))) \n\n elif location in ['Chile']:\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Ch_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Ch_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Ch_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Ch_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Ch_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Ch_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Ch_Rule7func(policy_input)))\n \n elif location in ['Santiago']:\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Sa_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Sa_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Sa_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Sa_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Sa_Rule5func(policy_input)))\n \n if location in ['Querétaro']:\n\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Br_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Br_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Br_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Br_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Br_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Br_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Br_Rule7func(policy_input)))\n\n return Rules", "def test_all_rules_are_tested():\n tested_rules = defaultdict(list)\n for test in TESTS:\n cls = test[0]\n rule = test[1]\n tested_rules[cls].append(rule)\n for cls in tested_rules.keys():\n if hasattr(cls, '_binary_rules'):\n rules = set(cls._binary_rules.keys())\n elif hasattr(cls, '_rules'):\n rules = set(cls._rules.keys())\n assert set(tested_rules[cls]) == rules", "def match_rules(rules, wm):\n res = []\n for r in rules:\n new_patterns = match_rule(r[0],r[1],r[2], wm)\n if new_patterns:\n print(\" Match succeeds\")\n print(\" Adding assertions to WM\")\n else:\n print(\" Match fails\")\n for n in new_patterns:\n if (n not in wm) and (n not in res):\n print(\" \",n)\n res.append(n)\n # print(\"new patterns so far = \", res)\n # print()\n # for testing\n # break\n return res", "def evaluate(self, request_info: RequestInfo):\n rule_results = [(rule, rule.matches(request_info)) for rule in self.rules]\n\n overriding_blocking_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.DENY and result == MatchResult.OVERRIDE\n ]\n overriding_allowing_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.ALLOW and result == MatchResult.OVERRIDE\n ]\n\n if len(overriding_allowing_rules) > 0:\n return Action.ALLOW, overriding_allowing_rules\n\n if len(overriding_blocking_rules) > 0:\n return Action.DENY, overriding_blocking_rules\n\n blocking_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.DENY and result == MatchResult.MATCH\n ]\n allowing_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.ALLOW and result == MatchResult.MATCH\n ]\n\n if len(allowing_rules) > 0:\n return Action.ALLOW, allowing_rules\n\n if len(blocking_rules) > 0:\n return Action.DENY, blocking_rules\n\n return Action.NOOP, None", "def get_searchable_rules(rules):\n searchable_rules = {rule.variable: {} for rule in rules}\n for rule in rules:\n searchable_rules[rule.variable][tuple(rule.derivation)] = rule\n return searchable_rules", "def get_rules_for_action(self, action_type: ActionType) -> List[\"Rule\"]:\n return [rule for rule in self.rules if rule.action_type == action_type]", "def get_rules(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/GetRulesV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"GetRulesV1\",\n keywords=kwargs,\n params=handle_single_argument(args, parameters, \"ids\")\n )", "def getRule(self, *args):\n return _libsbml.Model_getRule(self, *args)", "def rules_for_request(request):\n surt = request.GET.get('surt')\n warc = request.GET.get('warc')\n capture_date = int(request.GET.get('capture-date')) # XXX try/except\n if surt is None or warc is None or capture_date is None:\n return error('surt, warc, and capture-date query string params'\n ' are all required', {})\n surt = Surt(surt)\n tree = tree_for_surt(surt)\n warc_parts = warc.split('-') # XXX validate warc name\n warc_date = int(warc_parts[4][0:-5]) # Parse an int out of the date minus ms\n applicable_rules = []\n for rule in tree:\n start = int(rule.capture_start.strftime('%Y%m%d%H%M'))\n end = int(rule.capture_end.strftime('%Y%m%d%H%M'))\n if ((warc_date > start and warc_date < end) and\n (capture_date > start and capture_date < end)):\n applicable_rules.append(rule)\n # Here is where we would make a surface-level decision on the action to\n # be taken (block, auth, allow, rewrite, etc). A point of optimization would\n # be to use django to only select the rules matching the date range, but for\n # now, we select the whole tree. Also, date comparisons would probably be\n # faster than coercing to strings, then to ints, but I was running short\n # on time.\n return success([rule.summary() for rule in applicable_rules])", "def getSpecRules(self, rhs):\n if rhs not in self.itemSet:\n print('Please input a term contain in the term-set !')\n return None\n \n rules = dict()\n for key, value in self.freqSet.items():\n for item in value:\n if rhs.issubset(item) and len(item) > 1:\n item_supp = self.getSupport(item)\n item = item.difference(rhs)\n conf = item_supp / self.getSupport(item)\n if conf >= self.minConf:\n rules[item] = conf\n return rules", "def rules():\r\n print(\"\\n\\\r\n \\tMmmmm... Si vous etes ici, j'imagine que votre décision est prise !\\n\\\r\n Je ne veux pas vous inciter à arreter mais vous etes conscient des risques ?\\n\\\r\n Vous participez quand meme au plus grand combat de tous les temps...\\n\\n\\\r\n \\tBon je vous explique les règles : Vous allez affronter un autre titan en duel\\n\\\r\n Vous avez chacun des tentatives d'infliger une attaque. Si elles réussissent,\\n\\\r\n Vous aurez le choix d'infliger entre 0 et 100 dégats. Enfin non puisque tout\\n\\\r\n sera tiré au sort :)\\n\\\r\n La partie s'arrete quand l'un des deux titans est mort\\n\\\r\n \\tBonne chance a vous!\\n\")", "def _get_version_rules(self, vuln_versions):\n rules = []\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n regex_vr = \"[<>=*]+\"\n \"\"\"For all the vulnerable versions information that we get, we need to create\n comparable version object so that we can apply these rules on top of all the available\n versions of a pkg in the market.\"\"\"\n for version in vuln_versions:\n version = version.replace(\" \", \"\")\n sub_vers = version.split('||')\n for sub_ver in sub_vers:\n tmp = []\n vr_relations = re.split(regex_vr, sub_ver)\n op_relations = re.split(regex_op, sub_ver)\n # Single affected version.\n if len(vr_relations) == 1:\n tmp.append({\n 'key': \"=\",\n 'val': ComparableVersion(vr_relations[0])\n })\n # All versions affected.\n elif len(op_relations) == 1 and op_relations[0] == '*':\n tmp.append({\n 'key': \"*\",\n 'val': \"\"\n })\n else:\n for i in range(len(op_relations) - 1):\n tmp.append({\n 'key': op_relations[i],\n 'val': ComparableVersion(vr_relations[i + 1])\n })\n rules.append(tmp)\n\n return rules", "def validate(self):\n self._validate_rule(self.rules, allow_self_reference=self.allow_self_reference)\n self._validate_operator_arguments(self.rules)\n return self.errors", "def custom_rules(self) -> pulumi.Output[Optional[Sequence['outputs.FirewallPolicyCustomRule']]]:\n return pulumi.get(self, \"custom_rules\")", "def compile_rule(self, cmd):\n cfg, rules = self.config, self.rules\n if cmd == None:\n return Rule()\n if isinstance(cmd, dict):\n r = []\n if 'as' in cmd:\n r += [self.compile_rule(cmd['as'])]\n if 'find' in cmd:\n r += [FindRule(cmd['find'], self)]\n if 'split' in cmd:\n c = cmd['split']\n if 'by' not in c:\n raise ConfigError('\"split.by\" is not defined!')\n if 'as' not in c:\n raise ConfigError('\"split.as\" is not defined!')\n return SplitRule(c['by'], self.compile_rule(c['as']), self)\n if 'count' in cmd:\n r += [CountRule(cmd['count'], self)]\n if 'group' in cmd:\n r += [GroupRule(cmd['group'], self)]\n if len(r) == 0:\n return Rule()\n return AndRule(r) if len(r) > 1 else r[0]\n if isinstance(cmd, list):\n return AndRule([self.compile_rule(c) for c in cmd])\n if cmd[0] == '?':\n return FindRule(cmd[1:], self)\n if cmd[0] == '$':\n #reference\n key = cmd[1:]\n if key in rules:\n return rules[key]\n if key not in cfg:\n raise ConfigError('Reference \"%s\" not defined!' % cmd)\n if key in self.ref:\n raise ConfigError('Recursively reference to key \"%s\"' % key)\n self.ref.add(key)\n rules[key] = self.compile_rule(cfg[key])\n return rules[key]\n return AsRule(cmd, self)", "def _get_all_checks(self):\n this_class = self.__class__\n\n check_list = [\n getattr(self, func)\n for func in dir(self.__class__)\n if callable(getattr(this_class, func))\n and func.startswith(self.check_prefix)\n ]\n\n return check_list", "def get_rules(self, obj, current_path=[]):\n # If node isn't a rule or dictionary\n if type(obj) != dict:\n return []\n\n # If node is a rule return its location and its details\n if self.is_rule(obj):\n return [([self.field] + current_path, obj)]\n\n rules = []\n for path, val in obj.items():\n rules = rules + self.get_rules(val, current_path + [path])\n return rules", "def get_version_rules(self, vuln_versions):\n rules = []\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n regex_vr = \"[<>=*]+\"\n \"\"\"For all the vulnerable versions information that we get, we need to create\n comparable version object so that we can apply these rules on top of all the available\n versions of a pkg in the market.\"\"\"\n for version in vuln_versions:\n version = version.replace(\" \", \"\")\n version = version.replace(\"+incompatible\", \"\")\n sub_vers = version.split('||')\n for sub_ver in sub_vers:\n tmp = []\n vr_relations = re.split(regex_vr, sub_ver)\n op_relations = re.split(regex_op, sub_ver)\n # Single affected version.\n if len(vr_relations) == 1:\n tmp.append({\n 'key': \"=\",\n 'val': ComparableVersion(vr_relations[0])\n })\n # All versions affected.\n elif len(op_relations) == 1 and op_relations[0] == '*':\n tmp.append({\n 'key': \"*\",\n 'val': \"\"\n })\n else:\n for i in range(len(op_relations) - 1):\n tmp.append({\n 'key': op_relations[i],\n 'val': ComparableVersion(vr_relations[i + 1])\n })\n rules.append(tmp)\n\n return rules", "def rules():\n print \"Loading Rules\"\n return render_template(\"rules.html\")", "def parseFile(file,rules = None):\n if not rules: rules = RuleCollection()\n buf = \"\"\n for line in open(file,'r'):\n if not line[0]=='#':\n buf += line\n try:\n for (ptree,lo,hi) in ruleNT.scanString(buf):\n rules.add(Parser._convertRule(ptree))\n return rules\n except KeyError:\n print 'error near ',lo,'in',file\n return rules", "def get_rules(paths):\n raw_rules = []\n for path in paths:\n with open(path, \"r\", encoding=\"utf8\") as f:\n raw_rules += f.read().splitlines()\n \n return AdblockRules(raw_rules)", "def list_role_inference_rules(self):\n raise exception.NotImplemented() # pragma: no cover", "def get_rules(paths):\n raw_rules = []\n for path in paths:\n with open(path, \"r\", encoding=\"utf8\") as f:\n raw_rules += f.read().splitlines()\n \n return AdblockRules(raw_rules)", "def check_custom_rules(self):\n if self.custom_rules:\n with redirect_stdout(PrintLogger(name=\"pylint\", log_level=\"INFO\")):\n passed_custom, override = self.custom_rules(self.results.linter.stats, self.fname)\n if not passed_custom:\n self.logging.warning(\"{} FAILED CUSTOM CHECKS\".format(self.fname))\n self.custom_failed.append(self.fname)\n return passed_custom, override\n return False, False", "def readrules(self, fomalines):\n for lineno, l in enumerate(fomalines):\n if 'define' in l or 'def ' in l:\n rulecom = l.split(' #')\n r = re.findall(\"(defi?n?e?)\\s+(\\S+)\\s+([^;]+)\", rulecom[0])\n if len(r[0]) != 3:\n print \"Syntax error on line %i\" % lineno\n (_, rulename, rule) = r[0]\n if len(rulecom) > 1:\n commentline = rulecom[1].strip()\n else:\n commentline = ''\n self.rule_add(rulename, rule, commentline)\n if 'chain' in l:\n l = l.replace(';','')\n chain = re.findall('chain\\s+(.*)', l)\n rc = chain[0].replace(' ','').split(',')\n self.rc = rc", "def GetZeroQueryRules(input_file_name):\n rules = []\n with open(input_file_name, 'r') as input_file:\n for line in input_file:\n if line.startswith('#'):\n continue\n line = line.rstrip('\\r\\n')\n if not line:\n continue\n\n tokens = line.split('\\t')\n key = tokens[0]\n values = tokens[1].split(',')\n\n rules.append((key, values))\n rules.sort(lambda x, y: cmp(x[0], y[0])) # For binary search\n return rules", "def license_rules(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"license_rules\")", "def ip_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"ip_rules\")", "def apply_rules(term: Term, rules):\n return functools.reduce(apply_rule, rules, term)", "def get_rules_for_type(type):\n\n rules = get_db().execute('SELECT * FROM ruleset WHERE type=?', (type,)).fetchall()\n\n return rules" ]
[ "0.76105815", "0.7469598", "0.73660934", "0.7312732", "0.721728", "0.71480995", "0.70150936", "0.68892413", "0.68892413", "0.67432487", "0.66999936", "0.6694647", "0.6694647", "0.669131", "0.6645618", "0.6565479", "0.65333295", "0.6530434", "0.65208477", "0.651483", "0.65071875", "0.64957", "0.64917344", "0.6485106", "0.64597285", "0.645632", "0.6406928", "0.63938135", "0.6371905", "0.63318115", "0.6326222", "0.6316648", "0.6273836", "0.6265625", "0.6256686", "0.62564653", "0.62508076", "0.62500155", "0.6195834", "0.6165039", "0.61278063", "0.6122602", "0.61147696", "0.61017627", "0.60959786", "0.60942304", "0.6069379", "0.6068919", "0.60648304", "0.6062324", "0.6062047", "0.6041647", "0.6026492", "0.6014971", "0.6010809", "0.6007865", "0.59890103", "0.5986201", "0.59726673", "0.5965942", "0.59536374", "0.594658", "0.593446", "0.5930544", "0.5926714", "0.5888764", "0.5886819", "0.58844143", "0.58739257", "0.58707935", "0.58537644", "0.5829875", "0.5828193", "0.58248174", "0.58021975", "0.57991034", "0.5760543", "0.5746575", "0.5735376", "0.5733241", "0.57327616", "0.57306993", "0.5726072", "0.57221556", "0.57197666", "0.5710229", "0.57086056", "0.5704269", "0.56987405", "0.568455", "0.56782544", "0.56663966", "0.56466436", "0.56395495", "0.5600726", "0.55900496", "0.5580071", "0.5573578", "0.5553044", "0.5550792" ]
0.6011419
54
Return rules for checking.
def rules(cls): rules_Cityscapes = {"common": {"type": dict}, "train": {"type": dict}, "val": {"type": dict}, "test": {"type": dict} } return rules_Cityscapes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rules(cls):\n raise NotImplementedError()", "def get_rules(self):\n rules = []\n for item in self.name:\n rules.append(item)\n return rules", "def get_rules(self):\n rules = []\n for item in self.rule:\n rules.append(item)\n return rules", "def get_rules(self):\n # TODO: Implement\n self.traverse2(self.tree, [])\n return self.rules", "def rules(self):\n return tuple(e for e in self.entries if e.is_rule)", "def get_rules(cls) -> list:\n return [factory() for factory in cls._rules_factories]", "def rules(self):\n self.rule1 = min(self.location_is_lessDemand, self.area_is_small, self.unfunishing)\n self.rule2 = min(self.location_is_lessDemand, max(self.area_is_small, self.area_is_average), self.access_is_good)\n self.rule3 = min(self.location_is_veryHighDemand, self.area_is_average, self.fac_is_low, self.access_is_average)\n self.rule4 = min(self.location_is_veryLessDemand, self.area_is_verysmall, self.fully_funishing)\n self.rule5 = min(self.location_is_lessDemand, self.fac_is_average, max(self.area_is_small, self.area_is_average))\n self.rule6 = min(max(self.location_is_lessDemand, self.location_is_averageDemand), self.access_is_good)\n self.rule7 = min(self.location_is_lessDemand, self.access_is_good, self.area_is_large, self.partially_funishing)\n self.rule8 = min(self.location_is_highDemand, self.access_is_good, max(self.bed_is_less, self.bath_is_average))\n self.rule9 = min(self.location_is_veryHighDemand, self.area_is_large, self.unfunishing)\n self.rule10 = min(self.access_is_good, self.area_is_average, (1 - self.unfunishing))\n self.rule11 = min(self.access_is_good, self.area_is_large, self.partially_funishing, self.bed_is_more, self.bath_is_more)", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperations']]:\n return pulumi.get(self, \"rules\")", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperations']]:\n return pulumi.get(self, \"rules\")", "def getListOfRules(self):\n return self.model.getListOfRules()", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperationsPatch']]:\n return pulumi.get(self, \"rules\")", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperationsPatch']]:\n return pulumi.get(self, \"rules\")", "def validate(self):\n for rule in self.get_rules():\n rule.validate(self.get_val())", "def rule_conditions(self) -> pulumi.Output[Sequence['outputs.RuleRuleCondition']]:\n return pulumi.get(self, \"rule_conditions\")", "def filter_rules(self) -> list:\n return self.transform(self._tree), self._rules", "def hrules(self):\n ...", "def rule_conditions(self) -> pulumi.Input[Sequence[pulumi.Input['RuleRuleConditionArgs']]]:\n return pulumi.get(self, \"rule_conditions\")", "def rule_conditions(self) -> Sequence['outputs.GetRulesRuleRuleConditionResult']:\n return pulumi.get(self, \"rule_conditions\")", "def rules(self) -> List['outputs.PreventionInspectTemplateInspectConfigRuleSetRule']:\n return pulumi.get(self, \"rules\")", "def get_rules(self):\n return [phi for psi in self._Psi for phi in psi]", "def rules(self) -> FrozenOrderedSet[Union[Callable, Rule]]:\n return self._rules", "def test_rules():", "def getListOfRules(self, *args):\n return _libsbml.Model_getListOfRules(self, *args)", "def check_rules(self):\n conditions = ('element', 'name', 'type')\n metric = set(['type', 'value'])\n \n elements = {}\n for rule in self.rules:\n if not isinstance(rule, dict):\n self.fail(msg='Rule format is not type dict: %s, type: %s'\n % (rule, type(rule)))\n if 'name' not in rule:\n self.fail(msg='Name is a required field for all rules')\n if 'match_condition' in rule:\n if not isinstance(rule['match_condition'], list):\n self.fail(msg='Match condition is expected to be a list')\n\n for match in rule['match_condition']:\n if match.get('type') == 'metric':\n if set(match.keys()) ^ metric:\n self.fail(msg='Metric definition can only have '\n 'values: %s, given: %s' % (list(metric), match))\n continue\n # Validate fields in condition\n for field in conditions:\n if field not in match:\n self.fail(msg='Match condition is missing a required '\n 'key: %r ,required: %s' % (match, list(conditions)))\n if field == 'element' and match.get(field) not in access_list:\n self.fail(msg='Match condition element is not valid: %s, '\n 'valid types: %s' % (match.get(field), list(access_list)))\n elif field == 'type' and match[field] not in match_conditions:\n self.fail(msg='Match condition type is not valid: %s, '\n 'valid types: %s' % (match[field], list(match_conditions)))\n \n element = match.get('element')\n # peer_address can only be type engine or external_bgp_peer\n if match['type'] == 'peer_address' and element not \\\n in ('engine', 'external_bgp_peer'):\n self.fail(msg='A peer address element can only be of type '\n 'engine or external_bgp_peer, provided definition: %s' % match)\n elif match['type'] == 'next_hop' and ('prefix_list' not in \\\n element and 'access_list' not in element):\n self.fail(msg='A next hop definition must be either an access '\n 'list or prefix list type, provided defintion: %s' % match)\n \n if 'engine' in element:\n element = 'single_fw,fw_cluster,virtual_fw'\n elements.setdefault(\n element, set([])).add(match.get('name'))\n \n return [elements] if elements else []", "def effective_rules(self) -> pulumi.Output[Sequence[Any]]:\n return pulumi.get(self, \"effective_rules\")", "def vrules(self):\n ...", "def rules(self):\n return self._alert_rules_client", "def extract_rules(self, labels=None):\n # Extract flat list of rules in array form\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for dt in self._rule_dump]))\n \n # Convert each sub-rule into text, join together with '&' and then add to rules\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=labels, scaler=self.ext_scaler)) for r in rules])\n \n return self.rules", "def checks(self):\r\n return checks.Checks(self)", "def create_url_rules(self):\n return []", "def game_rules(self):\n self.valid_answers = dict()\n\n yield ScoreBoard(win=0, lose=0, tie=0)\n yield Results(winner='rock👊', loser='scissors✌', why='Rock👊 smashes scissors✌')\n yield Results(winner='paper👋', loser='rock👊', why='Paper👋 covers rock👊')\n yield Results(winner='scissors✌', loser='paper👋', why='Scissors✌ cut paper👋')\n yield ValidAnswer(answer='rock👊', key='r')\n yield ValidAnswer(answer='paper👋', key='p')\n yield ValidAnswer(answer='scissors✌', key='s')", "def items(self):\n return self._rules_by_lhs.items()", "def conditions(self):\n return self._separated_constructs(RuleCondition)", "def rules(self, transfer, robot_settings, dilution_settings):\n return []", "def getAllDecisionRules(self):\n\n #check this shit lol?\n thetas = self.getAllTheta()\n human_actions = self.getAllHumanActions()\n return [list(zip(thetas, item)) for item in itertools.product(human_actions, repeat=len(thetas))]", "def get_conditional_rules(self):\n conditional_rules = []\n\n for field in self.form.get_prep_value():\n\n rules = field['value'].get('rules', None)\n if rules:\n field_id = field['value'].get('field_id', None)\n if field_id:\n rules['field_name'] = field_id\n else:\n rules['field_name'] = clean_form_field_name(field['value']['label'])\n rules['required'] = field['value'].get('required', False)\n rules['field_type'] = field.get('type', None)\n conditions = rules.get('conditions', None)\n if len(conditions):\n for condition in conditions:\n del(condition['id'])\n del(condition['type'])\n condition['field_name'] = clean_form_field_name(condition['value']['field_name'])\n condition['rule'] = condition['value']['rule']\n condition['value'] = condition['value'].get('value', None)\n\n conditional_rules.append(rules)\n\n return conditional_rules", "def get_rules(self, M):\n return list(itertools.chain.from_iterable(\n list(itertools.chain.from_iterable(\n [[self.get_boxrules(x, M), self.get_unaryrules(x, M),\n self.at_least_one_rules(x, M), self.get_columnrules(x, M),\n self.get_rowrules(x, M)] for x in itertools.product(range(1, M+1),\n range(1, M+1))]\n ))\n ))", "def rules(self) -> pulumi.Input[Sequence[pulumi.Input['BucketLifecycleConfigurationV2RuleArgs']]]:\n return pulumi.get(self, \"rules\")", "def getRules(self):\n self._rules = {}\n _RuleSet = self._sets[self._currentSet - 1 ]\n for oneSet in _RuleSet :\n \n if len(oneSet) < 2 : \n pass \n \n for x in range(1, max(floor(len(oneSet) / 2),2) ):\n \n comb = combinations(oneSet, x)\n for item in comb:\n remaining = tuple(x for x in oneSet if x not in item)\n self._rules[(item,remaining)] = 0\n self._rules[(remaining,item)] = 0", "def extract_rule_names(self):\n if self.scanner == YARA:\n return sorted({result['rule'] for result in self.results})\n if self.scanner == CUSTOMS and 'matchedRules' in self.results:\n return self.results['matchedRules']\n # We do not have support for the remaining scanners (yet).\n return []", "def compliances(self) -> Sequence['outputs.GetRulesRuleComplianceResult']:\n return pulumi.get(self, \"compliances\")", "def get_rules(app):\n rules = [\n Rule('/', endpoint='home', handler='apps.busstopped.handlers.MainPage'),\n Rule('/ajax/busstopped/<line>/<direction>', endpoint='ajax-busstopped', handler='apps.busstopped.handlers.AjaxGetBusStopped'),\n Rule('/ajax/point', endpoint='ajax-point', handler='apps.busstopped.handlers.AjaxGetBusStopTimes'),\n Rule('/ajax/getbuspaths', endpoint='ajax-getbuspath', handler='apps.busstopped.handlers.AjaxGetBusPath'),\n Rule('/faq', endpoint='faq', handler='apps.busstopped.handlers.FAQPage'),\n Rule('/changelog', endpoint='change-log', handler='apps.busstopped.handlers.ChangeLogPage'),\n Rule('/info', endpoint='info', handler='apps.busstopped.handlers.InfoPage'),\n Rule('/addpoint', endpoint='add_point', handler='apps.busstopped.handlers.AddPointDocPage'),\n Rule('/news', endpoint='news', handler='apps.busstopped.handlers.NewsPage'),\n Rule('/parse', endpoint='parse', handler='apps.busstopped.handlers.ParseTimesPage'),\n ]\n\n return rules", "def get(self, *args):\n return _libsbml.ListOfRules_get(self, *args)", "def test_list_rules(self):\n pass", "def rules(cls):\n rules_CityscapesValConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesValConfig", "def get_all_rules(self):\n\n rules = set()\n for a_dict in self.get_dicts():\n rules = keywords.union(a_dict['rules'])\n return sorted(keywords)", "def _apply_commit_rules(rules, commit):\n all_violations = []\n for rule in rules:\n violations = rule.validate(commit)\n if violations:\n all_violations.extend(violations)\n return all_violations", "def rules(self) -> pulumi.Output[Sequence['outputs.BucketLifecycleConfigurationV2Rule']]:\n return pulumi.get(self, \"rules\")", "def get_rules():\n rules = []\n\n for app_module in get_config('tipfy', 'apps_installed'):\n try:\n # Load the urls module from the app and extend our rules.\n app_rules = import_string('%s.urls' % app_module)\n rules.extend(app_rules.get_rules())\n except ImportError:\n pass\n\n return rules", "def test_a1_check_rules(self):\n # Has rule\n rule = logic.check_rules(1, 1)\n self.assertEqual(rule, 1)\n rule = logic.check_rules(1, 2)\n self.assertEqual(rule, -1)\n rule = logic.check_rules(1, 4)\n self.assertEqual(rule, 3)\n rule = logic.check_rules(0, 3)\n self.assertEqual(rule, 4)\n rule = logic.check_rules(1, 8)\n self.assertEqual(rule, 3)\n rule = logic.check_rules(1, 0)\n self.assertEqual(rule, 1)\n\n # No rule match\n rule = logic.check_rules(0, 1)\n self.assertEqual(rule, -1)\n rule = logic.check_rules(0, 0)\n self.assertEqual(rule, -1)\n with self.assertRaises(ValueError):\n rule = logic.check_rules(1, -1)\n with self.assertRaises(ValueError):\n rule = logic.check_rules(1, 9)", "def get_rule_names(self):\n return self.rules.keys()", "def apply_rules(self):\n if len(self.rules) == 0:\n return\n for gene in self.population:\n for rule in self.rules:\n if rule.type == \"gene\":\n rule.check_and_apply(gene)", "def getChecks(self):\r\n raise AbstractError\r\n return []", "def rules(cls):\n rules_CityscapesTestConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesTestConfig", "def policy_rules(self) -> Sequence[Any]:\n return pulumi.get(self, \"policy_rules\")", "def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BucketLifecycleConfigurationV2RuleArgs']]]]:\n return pulumi.get(self, \"rules\")", "def rules(cls):\n rules_CityscapesConfig = {\"batch_size\": {\"type\": int},\n \"root_path\": {\"type\": str},\n \"num_parallel_batches\": {\"type\": int},\n \"fixed_size\": {\"type\": bool}\n }\n return rules_CityscapesConfig", "def __init__(self, rules):\n self.rules = rules\n\n self._rhs_rules = defaultdict(list)\n self._rhs_unary_rules = defaultdict(list)\n\n self._nonterm = set(rule.lhs for rule in rules)\n self._term = set(token for rhs in chain(rule.rhs for rule in rules)\n for token in rhs if token not in self._nonterm)\n\n for rule in rules:\n _, rhs, _ = rule\n self._rhs_rules[rhs].append(rule)\n\n for rhs_rules in self._rhs_rules.values():\n rhs_rules.sort(key=lambda r: r.log_prob, reverse=True)\n\n self._is_cnf = all(len(rule.rhs) == 1\n or (len(rule.rhs) == 2\n and all(s in self._nonterm for s in rule.rhs))\n for rule in self.rules)", "def create_rules(self, grids):\n from ..models import Rule\n\n alts = [grid.get_alternative_total_rating_tuples() for grid in grids]\n combinations = itertools.product(*alts)\n rules = []\n for combi in combinations:\n alts, ratings = zip(*combi)\n rules.append(Rule(alts, sum(ratings)))\n\n rules.sort()\n\n return rules", "def make_all_rules(self):\n\n def compatible(pattern1, pattern2, direction):\n \"\"\"Returns `True` if `pattern2` is compatible with `pattern1` in the `direction`,\n otherwise return `False`.\"\"\"\n if direction == 0:\n return pattern1[:-1] == pattern2[1:]\n if direction == 2:\n return [line[:-1] for line in pattern1] == [line[1:] for line in pattern2]\n\n for index in range(len(self.patterns)):\n for ind in range(index + 1):\n for direction in (0, 2):\n if compatible(self.patterns[index], self.patterns[ind], direction):\n self.rules[index][direction].add(ind)\n self.rules[ind][direction + 1].add(index)", "def rules(cls):\n rules_CityscapesTrainConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesTrainConfig", "def rules(self):\r\n return Acls(self)", "def rule_conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuleRuleConditionArgs']]]]:\n return pulumi.get(self, \"rule_conditions\")", "def _compile_rules(self):\n for state, table in self.RULES.items():\n patterns = []\n actions = []\n nextstates = []\n for i, row in enumerate(table):\n if len(row) == 2:\n pattern, action_ = row\n nextstate = None\n elif len(row) == 3:\n pattern, action_, nextstate = row\n else:\n fstr = \"invalid RULES: state {}, row {}\"\n raise CompileError(fstr.format(state, i))\n patterns.append(pattern)\n actions.append(action_)\n nextstates.append(nextstate)\n reobj = re.compile(\"|\".join(\"(\" + p + \")\" for p in patterns))\n self._rules[state] = (reobj, actions, nextstates)", "def get_real_rules():\n real = {}\n\n for name, rule in RULES.items():\n q = GraphMetric.select(GraphMetric.metric).where(\n GraphMetric.metric % name).group_by(GraphMetric.metric)\n\n for i in q:\n real[i.metric] = rule\n return real", "def eqv_path_rules(self) -> List[Tuple[CombinatorialClassType, Rule]]:\n eqv_path_rules = []\n curr = self.comb_class\n for rule in self.rules:\n eqv_path_rules.append((curr, rule))\n curr = rule.children[0]\n return eqv_path_rules", "def generate_rules(self):\n for rule in self._parser.conditionals:\n\n all_in_facts, matrix = self._generate_rules(rule)\n if all_in_facts is True:\n self.new_fact_from_facts(rule)\n else:\n facts = self._parser.conditionals[rule][1]\n #print(rule, facts, matrix)", "def find_matching_rules(self, requested_uri):\n rulesfound = []\n for rule in RewriteRule.objects.filter(register=self) :\n (matched,nestedrule) = rule.match_inheritance( requested_uri)\n if matched :\n rulechain = (rule,) + nestedrule.get_subrules() + (nestedrule,)\n rulesfound.append(rulechain)\n # get all sub rules not bound to a register \n return rulesfound", "def config_rules(self) -> Sequence['outputs.GetCompliancePacksPackConfigRuleResult']:\n return pulumi.get(self, \"config_rules\")", "def rule_list():\r\n #check RIGHT and DOWN borders\r\n all_blocks = get_blocks(-1)\r\n allowed = {}\r\n for i in range(len(all_blocks)): #index\r\n for j in range(len(all_blocks)):\r\n #check RIGHT border\r\n allowed[(i,j)] = [False,False]\r\n if all_blocks[i][1][2] == all_blocks[j][1][0]:\r\n allowed[(i,j)][0] = True\r\n #check DOWN border\r\n if all_blocks[i][2][1] == all_blocks[j][0][1]:\r\n allowed[(i,j)][1] = True\r\n return allowed", "def make_rules(UI):\n \n Conditionals = Conditional_Database(UI)\n location = UI.location\n \n \n Rules = []\n if location in ['Rio de Janeiro']:\n\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Br_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Br_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Br_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Br_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Br_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Br_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Br_Rule7func(policy_input)))\n\n elif location in ['Indonesia']:\n #National\n Rules.append(SDlib.Rule('Implement Some Restrictions Nationwide', 1, \n func = lambda policy_input: Conditionals.In_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Nationwide', 2, \n func = lambda policy_input: Conditionals.In_Rule2func(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Nationwide', 3, \n func = lambda policy_input: Conditionals.In_Rule3func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Nationwide', 4, \n func = lambda policy_input: Conditionals.In_Rule4func(policy_input))) \n #Java\n Rules.append(SDlib.Rule('Implement Some Restrictions Java - Zonal', 5, \n func = lambda policy_input: Conditionals.In_Rule1func_j(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Java - Zonal', 6, \n func = lambda policy_input: Conditionals.In_Rule2func_j(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Java - Zonal', 7, \n func = lambda policy_input: Conditionals.In_Rule3func_j(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Java - Zonal', 8, \n func = lambda policy_input: Conditionals.In_Rule4func_j(policy_input))) \n\n #Sulawesi\n Rules.append(SDlib.Rule('Implement Some Restrictions Sulawesi - Zonal', 9, \n func = lambda policy_input: Conditionals.In_Rule1func_s(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Sulawesi - Zonal', 10, \n func = lambda policy_input: Conditionals.In_Rule2func_s(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Sulawesi - Zonal', 11, \n func = lambda policy_input: Conditionals.In_Rule3func_s(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Sulawesi - Zonal', 12, \n func = lambda policy_input: Conditionals.In_Rule4func_s(policy_input))) \n\n elif location in ['Chile']:\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Ch_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Ch_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Ch_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Ch_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Ch_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Ch_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Ch_Rule7func(policy_input)))\n \n elif location in ['Santiago']:\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Sa_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Sa_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Sa_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Sa_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Sa_Rule5func(policy_input)))\n \n if location in ['Querétaro']:\n\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Br_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Br_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Br_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Br_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Br_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Br_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Br_Rule7func(policy_input)))\n\n return Rules", "def test_all_rules_are_tested():\n tested_rules = defaultdict(list)\n for test in TESTS:\n cls = test[0]\n rule = test[1]\n tested_rules[cls].append(rule)\n for cls in tested_rules.keys():\n if hasattr(cls, '_binary_rules'):\n rules = set(cls._binary_rules.keys())\n elif hasattr(cls, '_rules'):\n rules = set(cls._rules.keys())\n assert set(tested_rules[cls]) == rules", "def match_rules(rules, wm):\n res = []\n for r in rules:\n new_patterns = match_rule(r[0],r[1],r[2], wm)\n if new_patterns:\n print(\" Match succeeds\")\n print(\" Adding assertions to WM\")\n else:\n print(\" Match fails\")\n for n in new_patterns:\n if (n not in wm) and (n not in res):\n print(\" \",n)\n res.append(n)\n # print(\"new patterns so far = \", res)\n # print()\n # for testing\n # break\n return res", "def evaluate(self, request_info: RequestInfo):\n rule_results = [(rule, rule.matches(request_info)) for rule in self.rules]\n\n overriding_blocking_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.DENY and result == MatchResult.OVERRIDE\n ]\n overriding_allowing_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.ALLOW and result == MatchResult.OVERRIDE\n ]\n\n if len(overriding_allowing_rules) > 0:\n return Action.ALLOW, overriding_allowing_rules\n\n if len(overriding_blocking_rules) > 0:\n return Action.DENY, overriding_blocking_rules\n\n blocking_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.DENY and result == MatchResult.MATCH\n ]\n allowing_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.ALLOW and result == MatchResult.MATCH\n ]\n\n if len(allowing_rules) > 0:\n return Action.ALLOW, allowing_rules\n\n if len(blocking_rules) > 0:\n return Action.DENY, blocking_rules\n\n return Action.NOOP, None", "def get_searchable_rules(rules):\n searchable_rules = {rule.variable: {} for rule in rules}\n for rule in rules:\n searchable_rules[rule.variable][tuple(rule.derivation)] = rule\n return searchable_rules", "def get_rules_for_action(self, action_type: ActionType) -> List[\"Rule\"]:\n return [rule for rule in self.rules if rule.action_type == action_type]", "def get_rules(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/GetRulesV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"GetRulesV1\",\n keywords=kwargs,\n params=handle_single_argument(args, parameters, \"ids\")\n )", "def getRule(self, *args):\n return _libsbml.Model_getRule(self, *args)", "def rules_for_request(request):\n surt = request.GET.get('surt')\n warc = request.GET.get('warc')\n capture_date = int(request.GET.get('capture-date')) # XXX try/except\n if surt is None or warc is None or capture_date is None:\n return error('surt, warc, and capture-date query string params'\n ' are all required', {})\n surt = Surt(surt)\n tree = tree_for_surt(surt)\n warc_parts = warc.split('-') # XXX validate warc name\n warc_date = int(warc_parts[4][0:-5]) # Parse an int out of the date minus ms\n applicable_rules = []\n for rule in tree:\n start = int(rule.capture_start.strftime('%Y%m%d%H%M'))\n end = int(rule.capture_end.strftime('%Y%m%d%H%M'))\n if ((warc_date > start and warc_date < end) and\n (capture_date > start and capture_date < end)):\n applicable_rules.append(rule)\n # Here is where we would make a surface-level decision on the action to\n # be taken (block, auth, allow, rewrite, etc). A point of optimization would\n # be to use django to only select the rules matching the date range, but for\n # now, we select the whole tree. Also, date comparisons would probably be\n # faster than coercing to strings, then to ints, but I was running short\n # on time.\n return success([rule.summary() for rule in applicable_rules])", "def getSpecRules(self, rhs):\n if rhs not in self.itemSet:\n print('Please input a term contain in the term-set !')\n return None\n \n rules = dict()\n for key, value in self.freqSet.items():\n for item in value:\n if rhs.issubset(item) and len(item) > 1:\n item_supp = self.getSupport(item)\n item = item.difference(rhs)\n conf = item_supp / self.getSupport(item)\n if conf >= self.minConf:\n rules[item] = conf\n return rules", "def rules():\r\n print(\"\\n\\\r\n \\tMmmmm... Si vous etes ici, j'imagine que votre décision est prise !\\n\\\r\n Je ne veux pas vous inciter à arreter mais vous etes conscient des risques ?\\n\\\r\n Vous participez quand meme au plus grand combat de tous les temps...\\n\\n\\\r\n \\tBon je vous explique les règles : Vous allez affronter un autre titan en duel\\n\\\r\n Vous avez chacun des tentatives d'infliger une attaque. Si elles réussissent,\\n\\\r\n Vous aurez le choix d'infliger entre 0 et 100 dégats. Enfin non puisque tout\\n\\\r\n sera tiré au sort :)\\n\\\r\n La partie s'arrete quand l'un des deux titans est mort\\n\\\r\n \\tBonne chance a vous!\\n\")", "def _get_version_rules(self, vuln_versions):\n rules = []\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n regex_vr = \"[<>=*]+\"\n \"\"\"For all the vulnerable versions information that we get, we need to create\n comparable version object so that we can apply these rules on top of all the available\n versions of a pkg in the market.\"\"\"\n for version in vuln_versions:\n version = version.replace(\" \", \"\")\n sub_vers = version.split('||')\n for sub_ver in sub_vers:\n tmp = []\n vr_relations = re.split(regex_vr, sub_ver)\n op_relations = re.split(regex_op, sub_ver)\n # Single affected version.\n if len(vr_relations) == 1:\n tmp.append({\n 'key': \"=\",\n 'val': ComparableVersion(vr_relations[0])\n })\n # All versions affected.\n elif len(op_relations) == 1 and op_relations[0] == '*':\n tmp.append({\n 'key': \"*\",\n 'val': \"\"\n })\n else:\n for i in range(len(op_relations) - 1):\n tmp.append({\n 'key': op_relations[i],\n 'val': ComparableVersion(vr_relations[i + 1])\n })\n rules.append(tmp)\n\n return rules", "def validate(self):\n self._validate_rule(self.rules, allow_self_reference=self.allow_self_reference)\n self._validate_operator_arguments(self.rules)\n return self.errors", "def custom_rules(self) -> pulumi.Output[Optional[Sequence['outputs.FirewallPolicyCustomRule']]]:\n return pulumi.get(self, \"custom_rules\")", "def compile_rule(self, cmd):\n cfg, rules = self.config, self.rules\n if cmd == None:\n return Rule()\n if isinstance(cmd, dict):\n r = []\n if 'as' in cmd:\n r += [self.compile_rule(cmd['as'])]\n if 'find' in cmd:\n r += [FindRule(cmd['find'], self)]\n if 'split' in cmd:\n c = cmd['split']\n if 'by' not in c:\n raise ConfigError('\"split.by\" is not defined!')\n if 'as' not in c:\n raise ConfigError('\"split.as\" is not defined!')\n return SplitRule(c['by'], self.compile_rule(c['as']), self)\n if 'count' in cmd:\n r += [CountRule(cmd['count'], self)]\n if 'group' in cmd:\n r += [GroupRule(cmd['group'], self)]\n if len(r) == 0:\n return Rule()\n return AndRule(r) if len(r) > 1 else r[0]\n if isinstance(cmd, list):\n return AndRule([self.compile_rule(c) for c in cmd])\n if cmd[0] == '?':\n return FindRule(cmd[1:], self)\n if cmd[0] == '$':\n #reference\n key = cmd[1:]\n if key in rules:\n return rules[key]\n if key not in cfg:\n raise ConfigError('Reference \"%s\" not defined!' % cmd)\n if key in self.ref:\n raise ConfigError('Recursively reference to key \"%s\"' % key)\n self.ref.add(key)\n rules[key] = self.compile_rule(cfg[key])\n return rules[key]\n return AsRule(cmd, self)", "def _get_all_checks(self):\n this_class = self.__class__\n\n check_list = [\n getattr(self, func)\n for func in dir(self.__class__)\n if callable(getattr(this_class, func))\n and func.startswith(self.check_prefix)\n ]\n\n return check_list", "def get_rules(self, obj, current_path=[]):\n # If node isn't a rule or dictionary\n if type(obj) != dict:\n return []\n\n # If node is a rule return its location and its details\n if self.is_rule(obj):\n return [([self.field] + current_path, obj)]\n\n rules = []\n for path, val in obj.items():\n rules = rules + self.get_rules(val, current_path + [path])\n return rules", "def get_version_rules(self, vuln_versions):\n rules = []\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n regex_vr = \"[<>=*]+\"\n \"\"\"For all the vulnerable versions information that we get, we need to create\n comparable version object so that we can apply these rules on top of all the available\n versions of a pkg in the market.\"\"\"\n for version in vuln_versions:\n version = version.replace(\" \", \"\")\n version = version.replace(\"+incompatible\", \"\")\n sub_vers = version.split('||')\n for sub_ver in sub_vers:\n tmp = []\n vr_relations = re.split(regex_vr, sub_ver)\n op_relations = re.split(regex_op, sub_ver)\n # Single affected version.\n if len(vr_relations) == 1:\n tmp.append({\n 'key': \"=\",\n 'val': ComparableVersion(vr_relations[0])\n })\n # All versions affected.\n elif len(op_relations) == 1 and op_relations[0] == '*':\n tmp.append({\n 'key': \"*\",\n 'val': \"\"\n })\n else:\n for i in range(len(op_relations) - 1):\n tmp.append({\n 'key': op_relations[i],\n 'val': ComparableVersion(vr_relations[i + 1])\n })\n rules.append(tmp)\n\n return rules", "def rules():\n print \"Loading Rules\"\n return render_template(\"rules.html\")", "def parseFile(file,rules = None):\n if not rules: rules = RuleCollection()\n buf = \"\"\n for line in open(file,'r'):\n if not line[0]=='#':\n buf += line\n try:\n for (ptree,lo,hi) in ruleNT.scanString(buf):\n rules.add(Parser._convertRule(ptree))\n return rules\n except KeyError:\n print 'error near ',lo,'in',file\n return rules", "def get_rules(paths):\n raw_rules = []\n for path in paths:\n with open(path, \"r\", encoding=\"utf8\") as f:\n raw_rules += f.read().splitlines()\n \n return AdblockRules(raw_rules)", "def list_role_inference_rules(self):\n raise exception.NotImplemented() # pragma: no cover", "def get_rules(paths):\n raw_rules = []\n for path in paths:\n with open(path, \"r\", encoding=\"utf8\") as f:\n raw_rules += f.read().splitlines()\n \n return AdblockRules(raw_rules)", "def check_custom_rules(self):\n if self.custom_rules:\n with redirect_stdout(PrintLogger(name=\"pylint\", log_level=\"INFO\")):\n passed_custom, override = self.custom_rules(self.results.linter.stats, self.fname)\n if not passed_custom:\n self.logging.warning(\"{} FAILED CUSTOM CHECKS\".format(self.fname))\n self.custom_failed.append(self.fname)\n return passed_custom, override\n return False, False", "def readrules(self, fomalines):\n for lineno, l in enumerate(fomalines):\n if 'define' in l or 'def ' in l:\n rulecom = l.split(' #')\n r = re.findall(\"(defi?n?e?)\\s+(\\S+)\\s+([^;]+)\", rulecom[0])\n if len(r[0]) != 3:\n print \"Syntax error on line %i\" % lineno\n (_, rulename, rule) = r[0]\n if len(rulecom) > 1:\n commentline = rulecom[1].strip()\n else:\n commentline = ''\n self.rule_add(rulename, rule, commentline)\n if 'chain' in l:\n l = l.replace(';','')\n chain = re.findall('chain\\s+(.*)', l)\n rc = chain[0].replace(' ','').split(',')\n self.rc = rc", "def GetZeroQueryRules(input_file_name):\n rules = []\n with open(input_file_name, 'r') as input_file:\n for line in input_file:\n if line.startswith('#'):\n continue\n line = line.rstrip('\\r\\n')\n if not line:\n continue\n\n tokens = line.split('\\t')\n key = tokens[0]\n values = tokens[1].split(',')\n\n rules.append((key, values))\n rules.sort(lambda x, y: cmp(x[0], y[0])) # For binary search\n return rules", "def license_rules(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"license_rules\")", "def ip_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"ip_rules\")", "def apply_rules(term: Term, rules):\n return functools.reduce(apply_rule, rules, term)", "def get_rules_for_type(type):\n\n rules = get_db().execute('SELECT * FROM ruleset WHERE type=?', (type,)).fetchall()\n\n return rules" ]
[ "0.76105815", "0.7469598", "0.73660934", "0.7312732", "0.721728", "0.71480995", "0.70150936", "0.68892413", "0.68892413", "0.66999936", "0.6694647", "0.6694647", "0.669131", "0.6645618", "0.6565479", "0.65333295", "0.6530434", "0.65208477", "0.651483", "0.65071875", "0.64957", "0.64917344", "0.6485106", "0.64597285", "0.645632", "0.6406928", "0.63938135", "0.6371905", "0.63318115", "0.6326222", "0.6316648", "0.6273836", "0.6265625", "0.6256686", "0.62564653", "0.62508076", "0.62500155", "0.6195834", "0.6165039", "0.61278063", "0.6122602", "0.61147696", "0.61017627", "0.60959786", "0.60942304", "0.6069379", "0.6068919", "0.60648304", "0.6062324", "0.6062047", "0.6041647", "0.6026492", "0.6014971", "0.6011419", "0.6010809", "0.6007865", "0.59890103", "0.5986201", "0.59726673", "0.5965942", "0.59536374", "0.594658", "0.593446", "0.5930544", "0.5926714", "0.5888764", "0.5886819", "0.58844143", "0.58739257", "0.58707935", "0.58537644", "0.5829875", "0.5828193", "0.58248174", "0.58021975", "0.57991034", "0.5760543", "0.5746575", "0.5735376", "0.5733241", "0.57327616", "0.57306993", "0.5726072", "0.57221556", "0.57197666", "0.5710229", "0.57086056", "0.5704269", "0.56987405", "0.568455", "0.56782544", "0.56663966", "0.56466436", "0.56395495", "0.5600726", "0.55900496", "0.5580071", "0.5573578", "0.5553044", "0.5550792" ]
0.67432487
9
Builds the architecture of the network
def _model_definition(self, net): # Input filtering and downsampling with max pooling print(net.shape) #channels must be specified first otherwise keras assumes channels last print('resnet17_scp') net = Conv2D( filters=128, kernel_size=5, activation=None, padding='same', data_format="channels_first", input_shape=(1, 100, 100))(net) net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels net = LeakyReLU()(net) net= MaxPooling2D(pool_size=(2,2))(net) net = Conv2D( filters=64, kernel_size=3, activation=None, padding='same', data_format="channels_first")(net) net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels net = LeakyReLU()(net) net= MaxPooling2D(pool_size=(2,2))(net) net = Conv2D( filters=64, kernel_size=3,activation=None, padding='same', data_format="channels_first")(net) net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels net = LeakyReLU()(net) net= MaxPooling2D(pool_size=(2,2))(net) return net
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_network(self):\n pass", "def build_network(self, inputs, targets, training=False):\n raise NotImplementedError", "def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n self._net_outputs = self.online_convnet(self.state_ph, training=True)\n self._q_argmax = tf.argmax(self._net_outputs.q_values, axis=1)[0]\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n training=True)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)", "def make_architecture(self):\n self.arch = simple_mlp(num_inputs=self.p.model.num_inputs,\n num_outputs=self.p.model.num_outputs,\n params=self.p.model.arch)", "def __build_nn_architecture(self, dim_Y, hidden_dimensions):\n Y = tf.placeholder(tf.float32, shape=[None, dim_Y])\n config = tf.placeholder(tf.float32, shape=[None, len(self.knob_cols)])\n placeholders = [config, Y]\n weights, biases, outputs = self.__make_fc_layers(\n Y, dim_Y, hidden_dimensions, self.activations, trainable=[True] *\n (len(hidden_dimensions) + 1))\n architecture = [placeholders, weights, biases, outputs]\n return architecture", "def mgcNetArch(self, **kwargs):\n\n def_vals = {\"input_img_rows\" : self.input_img_rows,\n \"input_img_cols\" : self.input_img_cols,\n \"channels\" : self.channels,\n \"nb_classes\" : self.nb_classes,\n \"outLayer\" : 'gloAvg', \n \"l2_val\" : 0.00, \n \"net_architr\" : 'cnn_max', \n \"block_typex\" : 'basic', \n \"block_repeatx\" : [1, 1]\n }\n\n\n for k, v in def_vals.items():\n kwargs.setdefault(k, v)\n\n _input_img_rows = kwargs['input_img_rows']\n _input_img_cols = kwargs['input_img_cols']\n _channels = kwargs['channels']\n _nb_classes = kwargs['nb_classes']\n _outLayer = kwargs['outLayer']\n _l2_val = kwargs['l2_val']\n _net_architr = kwargs['net_architr']\n _block_typex = kwargs['block_typex']\n _block_repeatx = kwargs['block_repeatx']\n \n \n params = {\"input_img_rows\" : _input_img_rows,\n \"input_img_cols\" : _input_img_cols,\n \"channels\" : _channels,\n \"nb_classes\" : _nb_classes\n }\n \n print(_net_architr)\n if _net_architr == 'cnn_max':\n model = mgcNetArchMax(outLayer = _outLayer, l2_val = _l2_val, **params)\n \n elif _net_architr == 'cnn_stride':\n model = mgcNetArchStride2(outLayer = _outLayer, l2_val = _l2_val, **params)\n\n elif _net_architr == 'cnn_stride_mini':\n model = mgcNetArchStride2Mini(outLayer = _outLayer, l2_val = _l2_val, **params)\n\n elif _net_architr == 'common_cnn':\n model = mgcNetArchCommonCnn(outLayer = _outLayer, l2_val = _l2_val, **params)\n \n elif _net_architr == 'net_in_net':\n model = mgcNetArchNin(outLayer = _outLayer, l2_val = _l2_val, **params)\n \n elif _net_architr == 'resnet':\n model = mgcResnet(block_type = _block_typex, block_repeat = _block_repeatx, **params)\n \n elif _net_architr == 'resblock':\n model = mgcNetArchRes(outLayer = _outLayer, l2_val = _l2_val, **params)\n\n elif _net_architr == 'skipconnect':\n model = mgcNetArchSkip(outLayer = _outLayer, l2_val = _l2_val, **params)\n elif _net_architr == 'skipconnect_mini':\n model = mgcNetArchSkipMini(outLayer = _outLayer, l2_val = _l2_val, **params)\n \n self.model = model\n self.plot_model = SVG(model_to_dot(model, show_shapes = True).create(prog='dot', format='svg'))\n #self.model_summary = model.summary() \n \n return self", "def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')", "def test_build_basic(self):\n # Get the components for a network\n data = array([[0, 1], [1, 0]])\n cdata = CData(data)\n encoder = BinaryEncoding(cdata)\n layer = ProductAnsatz(2)\n measure = Measurement(2, [0])\n\n # Make the network\n qnn = Network([encoder, layer, measure], computer=\"2q-qvm\")\n\n # Build each circuit for the network\n net0 = qnn._build(0)\n net1 = qnn._build(1)\n\n # Check that each circuit is a BaseAnsatz\n self.assertEqual(type(net0), BaseAnsatz)\n self.assertEqual(type(net1), BaseAnsatz)", "def build_net(nz=100):\n\tif opts.celeba:\n\t\tgen = get_gen_celebA(nz=nz)\n\t\tdis = get_dis_celebA(nz=nz)\n\n\tif opts.mnist:\n\t\tgen = get_gen_mnist(nz=nz)\n\t\tdis = get_dis_mnist(nz=nz)\n\n\treturn gen, dis", "def build_network(self):\n # Position the node centers\n self.set_node_centers()\n\n # Set the nodes\n self.nodes = []\n for i in range(self.n_states):\n node = Node(\n self.node_centers[i],\n self.node_radius,\n self.labels[i]\n )\n self.nodes.append(node)", "def build_network_definition(rsn_oms):\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"build_network_definition. rsn_oms class: %s\",\n rsn_oms.__class__.__name__)\n\n # platform types:\n platform_types = rsn_oms.config.get_platform_types()\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"got platform_types %s\", str(platform_types))\n\n # platform map:\n map = rsn_oms.config.get_platform_map()\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"got platform map %s\", str(map))\n\n # build topology:\n pnodes = NetworkUtil.create_node_network(map)\n dummy_root = pnodes['']\n root_pnode = pnodes[dummy_root.subplatforms.keys()[0]]\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"topology's root platform_id=%r\", root_pnode.platform_id)\n\n # now, populate the attributes and ports for the platforms\n\n def build_attributes_and_ports(pnode):\n \"\"\"\n Recursive routine to call set_attributes and set_ports on each pnode.\n \"\"\"\n set_attributes(pnode)\n set_ports(pnode)\n\n for sub_platform_id, sub_pnode in pnode.subplatforms.iteritems():\n build_attributes_and_ports(sub_pnode)\n\n def set_attributes(pnode):\n platform_id = pnode.platform_id\n attr_infos = rsn_oms.attr.get_platform_attributes(platform_id)\n if not isinstance(attr_infos, dict):\n raise PlatformDriverException(\n \"%r: get_platform_attributes returned: %s\" % (\n platform_id, attr_infos))\n\n if log.isEnabledFor(logging.TRACE):\n log.trace(\"%r: attr_infos: %s\", platform_id, attr_infos)\n\n if not platform_id in attr_infos:\n raise PlatformDriverException(\n \"%r: get_platform_attributes response does not \"\n \"include entry for platform_id: %s\" %(\n platform_id, attr_infos))\n\n ret_infos = attr_infos[platform_id]\n for attrName, attr_defn in ret_infos.iteritems():\n attr = AttrNode(attrName, attr_defn)\n pnode.add_attribute(attr)\n\n def set_ports(pnode):\n platform_id = pnode.platform_id\n port_infos = rsn_oms.port.get_platform_ports(platform_id)\n if not isinstance(port_infos, dict):\n raise PlatformDriverException(\n \"%r: get_platform_ports response is not a dict: %s\" % (\n platform_id, port_infos))\n\n if log.isEnabledFor(logging.TRACE):\n log.trace(\"%r: port_infos: %s\", platform_id, port_infos)\n\n if not platform_id in port_infos:\n raise PlatformDriverException(\n \"%r: get_platform_ports response does not include \"\n \"platform_id: %s\" % (platform_id, port_infos))\n\n ports = port_infos[platform_id]\n\n if not isinstance(ports, dict):\n raise PlatformDriverException(\n \"%r: get_platform_ports: entry for platform_id is \"\n \"not a dict: %s\" % (platform_id, ports))\n\n for port_id, dic in ports.iteritems():\n port = PortNode(port_id, dic['network'])\n port.set_state(dic['state'])\n pnode.add_port(port)\n\n # add connected instruments:\n instrs_res = rsn_oms.instr.get_connected_instruments(platform_id, port_id)\n if not isinstance(instrs_res, dict):\n log.warn(\"%r: port_id=%r: get_connected_instruments \"\n \"response is not a dict: %s\" % (platform_id, port_id, instrs_res))\n continue\n\n if log.isEnabledFor(logging.TRACE):\n log.trace(\"%r: port_id=%r: get_connected_instruments \"\n \"returned: %s\" % (platform_id, port_id, instrs_res))\n\n if not platform_id in instrs_res:\n raise PlatformDriverException(\n \"%r: port_id=%r: get_connected_instruments response\"\n \"does not have entry for platform_id: %s\" % (\n platform_id, ports))\n\n if not port_id in instrs_res[platform_id]:\n raise PlatformDriverException(\n \"%r: port_id=%r: get_connected_instruments response \"\n \"for platform_id does not have entry for port_id: %s\" % (\n platform_id, port_id, instrs_res[platform_id]))\n\n instr = instrs_res[platform_id][port_id]\n for instrument_id, attrs in instr.iteritems():\n port.add_instrument(InstrumentNode(instrument_id, attrs))\n\n # call the recursive routine\n build_attributes_and_ports(root_pnode)\n\n # we got our whole network including platform attributes and ports.\n\n # and finally create and return NetworkDefinition:\n ndef = NetworkDefinition()\n ndef._platform_types = platform_types\n ndef._pnodes = pnodes\n ndef._dummy_root = dummy_root\n return ndef", "def save_network_architecture(self,network_path):\n net_architecture = {}\n net_architecture['y_res'] = self.y_res\n net_architecture['x_res'] = self.x_res\n net_architecture['n_input_channels'] = self.n_input_channels\n net_architecture['n_output_classes'] = self.n_output_classes\n net_architecture['conv1_size'] = self.conv1_size\n net_architecture['conv1_n_chan'] = self.conv1_n_chan\n net_architecture['conv1_n_pool'] = self.conv1_n_pool\n net_architecture['conv2_size'] = self.conv2_size\n net_architecture['conv2_n_chan'] = self.conv2_n_chan\n net_architecture['conv2_n_pool'] = self.conv2_n_pool\n net_architecture['fc1_n_chan'] = self.fc1_n_chan\n net_architecture['fc1_dropout'] = self.fc1_dropout\n net_architecture['alpha'] = self.alpha\n net_architecture['n_samples_trained'] = self.n_samples_trained\n net_architecture['n_class_samples_trained'] = self.n_class_samples_trained\n net_architecture['n_samples_list'] = self.n_samples_list\n net_architecture['n_class_samples_list'] = self.n_class_samples_list\n net_architecture['accuracy_list'] = self.accuracy_list\n net_architecture['precision_list'] = self.precision_list\n net_architecture['recall_list'] = self.recall_list\n net_architecture['F1_list'] = self.F1_list\n np.save(os.path.join( \\\n network_path,'net_architecture.npy'), net_architecture)\n self.log(\"Network architecture saved to file:\\n{}\".format(\n os.path.join(network_path,'net_architecture.npy')))", "def build_net(nz=100):\n\tif opts.celeba:\n\t\tgen = get_wgen_celebA(nz=nz)\n\t\tdis = get_wdis_celebA(nz=nz)\n\n\tif opts.mnist:\n\t\tgen = get_wgen_mnist(nz=nz)\n\t\tdis = get_wdis_mnist(nz=nz)\n\n\treturn gen, dis", "def display_network_architecture(self):\n self.log(\"\\n-------- Network architecture --------\")\n self.log(\"y_res: {}\".format(self.y_res))\n self.log(\"x_res: {}\".format(self.x_res))\n self.log(\"n_input_channels: {}\".format(self.n_input_channels))\n self.log(\"n_output_classes: {}\".format(self.n_output_classes))\n self.log(\"conv1_size: {}\".format(self.conv1_size))\n self.log(\"conv1_n_chan: {}\".format(self.conv1_n_chan))\n self.log(\"conv1_n_pool: {}\".format(self.conv1_n_pool))\n self.log(\"conv2_size: {}\".format(self.conv2_size))\n self.log(\"conv2_n_chan: {}\".format(self.conv2_n_chan))\n self.log(\"conv2_n_pool: {}\".format(self.conv2_n_pool))\n self.log(\"fc1_n_chan: {}\".format(self.fc1_n_chan))\n self.log(\"fc1_dropout: {}\".format(self.fc1_dropout))\n self.log(\"alpha: {}\".format(self.alpha))\n self.log(\"n_samples_trained: {}\".format(self.n_samples_trained))\n for c in range(self.n_output_classes):\n self.log( \" * Class {}, m = {}\".format( \\\n c, self.n_class_samples_trained[c] ) )", "def build_network(config):\n network_cfg = config['network']\n\n network_name = network_cfg['name']\n\n network_params = list(inspect.signature(eval(network_name).__init__).parameters)[1:]\n\n args = [f'{param}={network_cfg[param]}' for param in network_params if network_cfg.get(param)]\n\n try:\n model = eval('{}({})'.format(network_name, ', '.join(args)))\n except:\n raise ValueError('Can\\'t load network.')\n\n return model.to(device='cuda')", "def _build_networks(self):\n # Calling online_convnet will generate a new graph as defined in\n # self._get_network_template using whatever input is passed, but will always\n # share the same weights.\n self.online_convnet = tf.make_template('Online', self._network_template)\n self.target_convnet = tf.make_template('Target', self._network_template)\n self._net_outputs = self.online_convnet(self.state_ph)\n\n self._replay_net_outputs = self.online_convnet(self._replay.states)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)\n\n if self.acting_policy == 'hyperbolic':\n self._q_argmax = tf.argmax(self._net_outputs.hyp_q_value, axis=1)[0]\n elif self.acting_policy == 'largest_gamma':\n self._q_argmax = tf.argmax(self._net_outputs.q_values[-1], axis=1)[0]\n else:\n raise NotImplementedError", "def build_net(nz=100):\n\tif opts.celeba:\n\t\tinput_gen, gen = get_bigan_gen_celebA(nz = nz)\n\t\tinput_enc, enc = get_bigan_enc_celebA(nz = nz)\n\t\tz_dis, x_dis, dis = get_bigan_dis_celebA(nz = nz)\n\n\tif opts.mnist:\n\t\tinput_gen, gen = get_bigan_gen_mnist(nz = nz)\n\t\tinput_enc, enc = get_bigan_enc_mnist(nz = nz)\n\t\tz_dis, x_dis, dis = get_bigan_dis_mnist(nz = nz)\n\n\treturn input_gen, gen, input_enc, enc, dis, z_dis, x_dis", "def build_network(self, dimList, actType=\"Tanh\", verbose=True):\n self.Q_network = Model(dimList, actType, verbose=verbose)\n self.target_network = Model(dimList, actType)\n\n if self.device == torch.device(\"cuda\"):\n self.Q_network.cuda()\n self.target_network.cuda()\n\n self.build_optimizer()", "def build_net(self, role, chk=None, chk_optimizer=None):\n\t\tlog.info('Building net')", "def save_network_architecture(self,network_path):\n net_architecture = {}\n net_architecture['y_res'] = self.y_res\n net_architecture['x_res'] = self.x_res\n net_architecture['n_input_channels'] = self.n_input_channels\n net_architecture['n_output_classes'] = self.n_output_classes\n net_architecture['fc1_n_chan'] = self.fc1_n_chan\n net_architecture['fc1_dropout'] = self.fc1_dropout\n net_architecture['alpha'] = self.alpha\n net_architecture['n_samples_trained'] = self.n_samples_trained\n net_architecture['n_class_samples_trained'] = self.n_class_samples_trained\n net_architecture['n_samples_list'] = self.n_samples_list\n net_architecture['n_class_samples_list'] = self.n_class_samples_list\n net_architecture['accuracy_list'] = self.accuracy_list\n net_architecture['precision_list'] = self.precision_list\n net_architecture['recall_list'] = self.recall_list\n net_architecture['F1_list'] = self.F1_list\n np.save(os.path.join( \\\n network_path,'net_architecture.npy'), net_architecture)\n self.log(\"Network architecture saved to file:\\n{}\".format(\n os.path.join(network_path,'net_architecture.npy')))", "def build(self):\n\n LOG.debug('-' * 80)\n LOG.debug(\"build\")\n LOG.debug('-' * 80)\n for b in self._bridges:\n bridge = b['bridge']\n # TODO(tomohiko) Need to something when not bridge['provided']?\n if bridge['provided']:\n LOG.info('Skipped building bridge=%r', bridge)\n\n for h in self._hosts:\n host = h['host']\n if host.get('tunnel_zone'):\n tz_data = host.get('tunnel_zone')\n tzs = self._api.get_tunnel_zones()\n\n # Ensure that TZ exists\n tz = [t for t in tzs if t.get_name() == tz_data['name']]\n if tz == []:\n if is_vxlan_enabled():\n tz = self._api.add_vxlan_tunnel_zone()\n else:\n tz = self._api.add_gre_tunnel_zone()\n tz.name(tz_data['name'])\n tz.create()\n else:\n tz = tz[0]\n\n # Ensure that the host is in the TZ\n tz_hosts = tz.get_hosts()\n tz_host = filter(\n lambda x: x.get_host_id() == host['mn_host_id'],\n tz_hosts)\n if tz_host == []:\n tz_host = tz.add_tunnel_zone_host()\n tz_host.ip_address(tz_data['ip_addr'])\n tz_host.host_id(host['mn_host_id'])\n tz_host.create()\n\n\n if host['provided'] == True:\n LOG.info('Skipped building host=%r', host)\n else:\n #TODO(tomoe): when we support provisioning Midolman host with\n # this tool.\n pass\n interfaces = host['interfaces']\n\n futures = []\n for i in interfaces:\n iface = Interface(i['interface'], host)\n self._interfaces[(host['id'], i['interface']['id'])] = iface\n f = iface.create()\n futures.append(f)\n\n wait_on_futures(futures)\n\n LOG.debug('-' * 80)\n LOG.debug(\"end build\")\n LOG.debug('-' * 80)", "def display_network_architecture(self):\n self.log(\"\\n-------- Network architecture --------\")\n self.log(\"y_res: {}\".format(self.y_res))\n self.log(\"x_res: {}\".format(self.x_res))\n self.log(\"n_input_channels: {}\".format(self.n_input_channels))\n self.log(\"n_output_classes: {}\".format(self.n_output_classes))\n self.log(\"fc1_n_chan: {}\".format(self.fc1_n_chan))\n self.log(\"fc1_dropout: {}\".format(self.fc1_dropout))\n self.log(\"alpha: {}\".format(self.alpha))\n self.log(\"n_samples_trained: {}\".format(self.n_samples_trained))\n for c in range(self.n_output_classes):\n self.log( \" * Class {}, m = {}\".format( \\\n c, self.n_class_samples_trained[c] ) )", "def build_graph(self, graph, inst_name, port_nets):\n return", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def display_network_architecture(self):\n self.log(\"\\n-------- Network architecture --------\")\n self.log(\"y_res: {}\".format(self.y_res))\n self.log(\"x_res: {}\".format(self.x_res))\n self.log(\"n_input_channels: {}\".format(self.n_input_channels))\n self.log(\"n_output_classes: {}\".format(self.n_output_classes))\n self.log(\"input_dropout: {}\".format(self.fc1_dropout))\n self.log(\"alpha: {}\".format(self.alpha))\n self.log(\"n_samples_trained: {}\".format(self.n_samples_trained))\n for c in range(self.n_output_classes):\n self.log( \" * Class {}, m = {}\".format( \\\n c, self.n_class_samples_trained[c] ) )", "def buildNetwork(self):\n\n # create the network node for our module\n self.networkNode = cmds.createNode(\"network\", name=self.modName)\n\n # create attributes\n self.addAttributes()\n\n return self.networkNode", "def construct_from_anatomy(self, anet, architecture):\n # construct conv layer for input -> LGNd\n self.area_channels['input'] = INPUT_SIZE[0]\n self.area_size['input'] = INPUT_SIZE[1]\n \n out_sigma = 1\n out_channels = np.floor(anet.find_layer('LGNd','').num/out_sigma/INPUT_SIZE[1]/INPUT_SIZE[2])\n architecture.set_num_channels('LGNd', '', out_channels)\n self.area_channels['LGNd'] = out_channels\n \n out_size = INPUT_SIZE[1] * out_sigma\n self.area_size['LGNd'] = out_size\n \n convlayer = ConvLayer('input', 'LGNd',\n ConvParam(in_channels=INPUT_SIZE[0], \n out_channels=out_channels,\n gsh=INPUT_GSH,\n gsw=INPUT_GSW, out_sigma=out_sigma),\n out_size)\n self.layers.append(convlayer)\n \n # construct conv layers for all other connections\n G, _ = anet.make_graph()\n Gtop = nx.topological_sort(G)\n root = next(Gtop) # get root of graph\n for i, e in enumerate(nx.edge_bfs(G, root)):\n \n in_layer_name = e[0].area+e[0].depth\n out_layer_name = e[1].area+e[1].depth\n print('constructing layer %s: %s to %s'%(i, in_layer_name, out_layer_name))\n \n in_conv_layer = self.find_conv_target_area(in_layer_name)\n in_size = in_conv_layer.out_size\n in_channels = in_conv_layer.params.out_channels\n \n out_anat_layer = anet.find_layer(e[1].area, e[1].depth)\n \n out_sigma = get_out_sigma(e[0].area, e[0].depth, e[1].area, e[1].depth)\n out_size = in_size * out_sigma\n self.area_size[e[1].area+e[1].depth] = out_size\n\n if SUBFIELDS:\n pixel_area = calculate_pixel_area_with_visual_field(architecture, e[1].area, e[1].depth)\n out_channels = np.floor(out_anat_layer.num / pixel_area)\n else:\n out_channels = np.floor(out_anat_layer.num/out_size**2)\n\n architecture.set_num_channels(e[1].area, e[1].depth, out_channels)\n self.area_channels[e[1].area+e[1].depth] = out_channels\n \n convlayer = ConvLayer(in_layer_name, out_layer_name, \n ConvParam(in_channels=in_channels, \n out_channels=out_channels,\n gsh=architecture.get_kernel_peak_probability(e[0].area, e[0].depth, e[1].area, e[1].depth),\n gsw=architecture.get_kernel_width_pixels(e[0].area, e[0].depth, e[1].area, e[1].depth), out_sigma=out_sigma),\n out_size)\n \n self.layers.append(convlayer)", "def save_network_architecture(self,network_path):\n net_architecture = {}\n net_architecture['y_res'] = self.y_res\n net_architecture['x_res'] = self.x_res\n net_architecture['n_input_channels'] = self.n_input_channels\n net_architecture['n_output_classes'] = self.n_output_classes\n net_architecture['fc1_dropout'] = self.fc1_dropout\n net_architecture['alpha'] = self.alpha\n net_architecture['n_samples_trained'] = self.n_samples_trained\n net_architecture['n_class_samples_trained'] = self.n_class_samples_trained\n net_architecture['n_samples_list'] = self.n_samples_list\n net_architecture['n_class_samples_list'] = self.n_class_samples_list\n net_architecture['accuracy_list'] = self.accuracy_list\n net_architecture['precision_list'] = self.precision_list\n net_architecture['recall_list'] = self.recall_list\n net_architecture['F1_list'] = self.F1_list\n np.save(os.path.join( \\\n network_path,'net_architecture.npy'), net_architecture)\n self.log(\"Network architecture saved to file:\\n{}\".format(\n os.path.join(network_path,'net_architecture.npy')))", "def build_model(self):\n for link in self.links:\n # if from neuron is input to graph, add it to input_neurons set\n if self.is_input_neuron(link.from_neuron_id):\n self.input_neurons.add(link.from_neuron_id)\n # add weight to neuron\n if link.to_neuron_id not in self.weights:\n self.weights[link.to_neuron_id] = []\n self.weights[link.to_neuron_id].append(link.weight)\n # add input to neuron\n if link.to_neuron_id not in self.connections:\n self.connections[link.to_neuron_id] = []\n self.connections[link.to_neuron_id].append(link.from_neuron_id)", "def build(self):\n self.build_inputs()\n self.build_image_embeddings()\n self.build_seq_embeddings()\n self.build_encoder()\n self.build_prediction_model()\n self.setup_encoder_initializer()\n self.setup_global_step()\n self.list_trainable_variables()", "def _build_model(self):\n\n with tf.variable_scope(\"Matchnet\", reuse=tf.AUTO_REUSE):\n # For determining the runtime shape\n x_shp = tf.shape(self.x_in)\n\n # -------------------- Network archintecture --------------------\n # Build graph\n print(\"Building Graph\")\n self.logits = build_graph(self.x_in, self.is_training, self.config)\n # ---------------------------------------------------------------\n\n # Turn into weights for each sample\n weights = tf.nn.relu(tf.tanh(self.logits))\n\n # Make input data (num_img_pair x num_corr x 4)\n xx = tf.transpose(tf.reshape(\n self.x_in, (x_shp[0], x_shp[2], 4)), (0, 2, 1))\n\n # Create the matrix to be used for the eight-point algorithm\n X = tf.transpose(tf.stack([\n xx[:, 2] * xx[:, 0], xx[:, 2] * xx[:, 1], xx[:, 2],\n xx[:, 3] * xx[:, 0], xx[:, 3] * xx[:, 1], xx[:, 3],\n xx[:, 0], xx[:, 1], tf.ones_like(xx[:, 0])\n ], axis=1), (0, 2, 1))\n print(\"X shape = {}\".format(X.shape))\n wX = tf.reshape(weights, (x_shp[0], x_shp[2], 1)) * X\n print(\"wX shape = {}\".format(wX.shape))\n XwX = tf.matmul(tf.transpose(X, (0, 2, 1)), wX)\n print(\"XwX shape = {}\".format(XwX.shape))\n\n # Recover essential matrix from self-adjoing eigen\n e, v = tf.self_adjoint_eig(XwX)\n self.e_hat = tf.reshape(v[:, :, 0], (x_shp[0], 9))\n # Make unit norm just in case\n self.e_hat /= tf.norm(self.e_hat, axis=1, keep_dims=True)", "def build_graph(self):\n if self.model == 'dense':\n # ForecastNet with two densely connected hidden layers in a cell and Mixture Density Network outputs\n self.outputs, self.mu, self.sigma, self.cost = forecastnet_graph(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'conv':\n # ForecastNet with a convlutional neural network in a cell and Mixture Density Network outputs\n self.outputs, self.mu, self.sigma, self.cost = forecastnet_conv_graph(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'dense2':\n # ForecastNet with two densely connected hidden layers in a cell and linear outputs\n self.outputs, self.cost = forecastnet_graph2(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'conv2':\n # ForecastNet with a convolutional neural network in a cell and linear outputs\n self.outputs, self.cost = forecastnet_conv_graph2(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)", "def build_graph(self):\n n_classes = self.n_classes\n\n (self.feed('data')\n .conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)\n .conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool1')\n .conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)\n .conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool2')\n .conv(3, 3, 256, 1, 1, name='conv3_1')\n .conv(3, 3, 256, 1, 1, name='conv3_2')\n .conv(3, 3, 256, 1, 1, name='conv3_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool3')\n .conv(3, 3, 512, 1, 1, name='conv4_1')\n .conv(3, 3, 512, 1, 1, name='conv4_2')\n .conv(3, 3, 512, 1, 1, name='conv4_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool4')\n .conv(3, 3, 512, 1, 1, name='conv5_1')\n .conv(3, 3, 512, 1, 1, name='conv5_2')\n .conv(3, 3, 512, 1, 1, name='conv5_3'))\n\n self.compute_rDeRF() # dummy\n\n # Classification\n (self.feed('conv5_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool6')\n .reshape(shape=(-1, 7, 7, 512), name='pool6_reshape')\n .fc(4096, name='fc6')\n .dropout(0.5, name='drop6')\n .fc(4096, name='fc7')\n .dropout(0.5, name='drop7')\n # .make_time(name='drop7_reduced')\n .fc(n_classes, relu=False, name='cls_score')\n .softmax(name='cls_prob'))\n pass", "def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()", "def compile(self):\n logger.info('Define network with dnnet of version : %s'\\\n % dnnet.__version__)\n if self.layers.size == 0:\n msg = 'NeuralNetwork has no layer.\\n Add layers before compiling.'\n raise DNNetRuntimeError(msg)\n\n parent = self.layers[0]\n self.add(OutputLayer())\n\n for i, layer in enumerate(self.layers, 1):\n logger.debug('Add %s layer.' % layer.get_type())\n layer.set_parent(parent)\n parent = layer\n\n logger.debug('Defined network.')", "def _compile_networks(self):\n\n _header_ = self._header_ + '_compile_networks(): '\n\n if self.verbose:\n print(_header_ + 'Compiling all networks ...')\n\n networks = []\n\n all_nidx = set(self.nidx2lidx.keys())\n\n while all_nidx:\n\n nidx0 = [all_nidx.pop()]\n network = set(nidx0)\n\n while nidx0 and all_nidx:\n\n nidx = set()\n\n for l in nidx0:\n lidx = self.nidx2lidx[l]\n for n in lidx:\n nidx |= self.lidx2nidx[n]\n\n nidx -= network\n network |= nidx\n all_nidx -= nidx\n nidx0 = nidx.copy()\n\n networks.append(network)\n\n if self.verbose:\n print(_header_ + 'Found %d networks' % len(networks))\n for i, network in enumerate(networks):\n print(' Network %d - %s' % (i, ','.join([str(j) for j in network])))\n\n return networks", "def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()", "def build_topology(self):\n# errstr = \"build_topology() is not implemented.\\n\"\n# errstr += textwrap.dedent(self.build_topology.__doc__)\n# raise NotImplementedError(errstr)\n pass # May be a 1-compartment neuron. No need to abstract. ", "def _keras_build_fn(architecture=None, prediction_periods=1):\n \n # List of optimizers that can be specified in the architecture\n optimizers = ['Adadelta', 'Adagrad', 'Adam', 'Adamax', 'Nadam', 'RMSprop', 'SGD']\n \n # The model definition should contain at least one layer and the compilation parameters\n if architecture is None or len(architecture) < 2:\n err = \"Invalid Keras architecture. Expected at least one layer and compilation parameters.\"\n raise Exception(err)\n # The last row of the model definition should contain compilation parameters\n elif not architecture.iloc[-1,0].capitalize() in ['Compile', 'Compilation']:\n err = \"Invalid Keras architecture. The last row of the model definition should provide 'Compile' parameters.\"\n raise Exception(err)\n \n # sys.stdout.write(\"Architecture Data Frame in _keras_build_fn:\\n{}\\n\\n\".format(architecture.to_string()))\n\n neural_net = keras.models.Sequential()\n\n for i in architecture.index:\n # Name items in the row for easy access\n name, args, kwargs = architecture.iloc[i,0], architecture.iloc[i,1], architecture.iloc[i,2]\n\n # The last row of the DataFrame should provide compilation keyword arguments\n if i == max(architecture.index):\n # Check if an optimizer with custom parameters has been defined\n try:\n kwargs = kwargs.copy() # Copy so that we don't modify the architecture dataframe\n kwargs['optimizer'] = opt\n except UnboundLocalError:\n pass\n \n # Compile the model\n neural_net.compile(**kwargs)\n # Watch out for a row providing optimizer parameters\n elif name in optimizers:\n opt = getattr(keras.optimizers, name)(**kwargs) \n # All other rows of the DataFrame define the model architecture\n else:\n # Check if the name includes a layer wrapper e.g. TimeDistributed Dense\n names = name.split(' ')\n if len(names) == 2:\n wrapper = names[0]\n name = names[1]\n \n # Get wrapper kwargs\n wrapper_kwargs = dict()\n if 'merge_mode' in kwargs:\n wrapper_kwargs['merge_mode'] = kwargs.pop('merge_mode')\n else:\n wrapper = None\n\n # Create a keras layer of the required type with the provided positional and keyword arguments\n layer = getattr(keras.layers, name)(*args, **kwargs)\n\n if wrapper:\n # Create the layer wrapper\n wrapper = getattr(keras.layers, wrapper)(layer, **wrapper_kwargs)\n # Add the layer wrapper to the model\n neural_net.add(wrapper) \n else:\n # Add the layer to the model\n neural_net.add(layer)\n \n # Get the number of nodes for the final layer\n output_features = neural_net.layers[-1].get_config()['units']\n assert prediction_periods == output_features, \"The number of nodes in the final layer of the network must match the prediction_periods execution argument. Expected {} nodes but got {}.\".format(prediction_periods, output_features)\n \n return neural_net", "def _doNetInstallBuild(self, farbconfig):\n print \"Building network installation root ...\"\n try:\n ibr = runner.NetInstallAssemblerRunner(farbconfig)\n ibr.run()\n print \"Network installation root created.\"\n except runner.NetInstallAssemblerRunnerError, e:\n print >>sys.stderr, e\n sys.exit(1)", "def _generate_network_initialization(self, graph, memory_manager):\n\n # TODO: To be changed if we want to support multiple outputs\n output_buffer_name = graph.outputs[0].name\n\n ops_to_ignore = ['Reshape', 'Mul']\n\n buffers_allocated = []\n\n buffer_declaration = \"\"\n buffer_declaration += \" pico_cnn::naive::Tensor **kernels;\\n\"\n buffer_declaration += \" pico_cnn::naive::Tensor **biases;\\n\"\n\n constructor_code = \"\"\n #constructor_code += \"Network::Network() {\\n\\n\"\n\n num_layers = 0\n num_kernels = 0\n num_biases = 0\n\n for node in graph.nodes:\n \"\"\"Do not count the reshape layers as the input tensor will only define the dimensions\"\"\"\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n num_layers += 1\n for num, input in enumerate(node.input_tensors):\n if input in buffers_allocated:\n continue\n else:\n tensor = node.input_tensors[input]\n buffers_allocated.append(input)\n if len(tensor.shape) == 1:\n num_biases += 1\n else:\n num_kernels += 1\n\n \"\"\"The arrays kernels and biases will be used to pass only two variables to read_binary_weights\"\"\"\n constructor_code += \" kernels = new pico_cnn::naive::Tensor*[{}]();\\n\".format(num_kernels)\n constructor_code += \" biases = new pico_cnn::naive::Tensor*[{}]();\\n\\n\".format(num_biases)\n\n pos = -1\n pos_kernel = -1\n pos_bias = -1\n\n buffers_allocated.clear()\n\n \"\"\"Iterate over all nodes in the graph and generate the corresponding allocation code.\"\"\"\n for node_id, node in enumerate(graph.nodes):\n\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n pos += 1\n\n buffer_declaration += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n constructor_code += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n\n # Allocate memory for kernels and biases\n buffer_declaration += \" // Inputs\\n\"\n constructor_code += \" // Inputs\\n\"\n for num, input in enumerate(node.input_tensors):\n\n if node.op_type in ops_to_ignore:\n continue\n\n if input in buffers_allocated:\n continue\n else:\n buffers_allocated.append(input)\n\n tensor = node.input_tensors[input]\n if len(tensor.shape) == 1:\n pos_bias += 1\n else:\n pos_kernel += 1\n\n buffer = memory_manager.get_buffer(graph, input)\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"KernelAllocation\")\n impl = functionality[0].create(buffer, pos, pos_kernel, pos_bias)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \" // Outputs\\n\"\n constructor_code += \" // Outputs\\n\"\n for num, output in enumerate(node.outputs):\n\n buffer = memory_manager.get_buffer(graph, output)\n\n if output == output_buffer_name:\n buffer_declaration += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n constructor_code += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n continue\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"OutputAllocation\")\n impl = functionality[0].create(buffer)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \"\\n\\n\"\n constructor_code += \"\\n\\n\"\n\n #constructor_code += \"}\\n\"\n\n self.buffer_declaration = buffer_declaration\n self.constructor_code = constructor_code", "def __init__(self, network_path='.', logging=True,\n input_image_size=None, n_input_channels=None,\n n_output_classes=None,\n fc1_n_chan=1024, fc1_dropout=0.5, alpha=4e-4 ):\n self.logging = logging\n\n # If network path does not yet exists\n self.network_path = network_path\n if not os.path.isdir(self.network_path):\n # Make network directory\n os.mkdir(self.network_path)\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Creation of new network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log(\"\\nNetwork did not exist ... \")\n self.log(\"Created new network with supplied (or default) architecture\")\n\n # Set up new network\n self.y_res = input_image_size[0]\n self.x_res = input_image_size[1]\n self.n_input_channels = n_input_channels\n self.n_output_classes = n_output_classes\n self.fc1_n_chan = fc1_n_chan\n self.fc1_dropout = fc1_dropout\n self.alpha = alpha\n self.n_samples_trained = 0\n self.n_class_samples_trained = self.n_output_classes*[0]\n self.n_samples_list = []\n self.n_class_samples_list = [[] for _ in range(self.n_output_classes)]\n self.accuracy_list = [[] for _ in range(self.n_output_classes)]\n self.precision_list = [[] for _ in range(self.n_output_classes)]\n self.recall_list = [[] for _ in range(self.n_output_classes)]\n self.F1_list = [[] for _ in range(self.n_output_classes)]\n\n # Save network architecture\n self.save_network_architecture( network_path=self.network_path )\n\n else:\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Re-initialization of existing network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \" \")\n\n # Load network architecture from directory\n net_architecture = self.load_network_architecture(self.network_path)\n\n # Set up network variables from loaded architecture\n self.y_res = net_architecture['y_res']\n self.x_res = net_architecture['x_res']\n self.n_input_channels = net_architecture['n_input_channels']\n self.n_output_classes = net_architecture['n_output_classes']\n self.fc1_n_chan = net_architecture['fc1_n_chan']\n self.fc1_dropout = net_architecture['fc1_dropout']\n self.alpha = net_architecture['alpha']\n self.n_samples_trained = net_architecture['n_samples_trained']\n self.n_class_samples_trained = net_architecture['n_class_samples_trained']\n self.n_samples_list = net_architecture['n_samples_list']\n self.n_class_samples_list = net_architecture['n_class_samples_list']\n self.accuracy_list = net_architecture['accuracy_list']\n self.precision_list = net_architecture['precision_list']\n self.recall_list = net_architecture['recall_list']\n self.F1_list = net_architecture['F1_list']\n\n # Update values of alpha and dropout if supplied\n if self.alpha != alpha:\n self.alpha = alpha\n self.log(\"Updated learning rate 'alpha' to {}\".format(self.alpha))\n if self.fc1_dropout != fc1_dropout:\n self.fc1_dropout = fc1_dropout\n self.log(\"Updated dropout fraction to {}\".format(self.fc1_dropout))\n\n # Clear previous graphs\n tf.reset_default_graph()\n\n #########################################################\n # Input and target variable placeholders\n # x = [ m_samples x [channel_1_data, channel_2_data, etc.] ]\n self.x = tf.placeholder( tf.float32, shape = [None,\n self.n_input_channels * self.y_res * self.x_res] )\n self.y_trgt = tf.placeholder( tf.float32, \\\n shape = [None, self.n_output_classes] )\n\n #########################################################\n # Densely Connected Layer\n # Weights and bias\n self.fc1_shape = \\\n [self.y_res * self.x_res * self.n_input_channels,\n self.fc1_n_chan]\n self.W_fc1 = tf.Variable( tf.truncated_normal(\n shape=self.fc1_shape, stddev=0.1 ) )\n self.b_fc1 = tf.Variable( tf.constant(0.1, shape=[self.fc1_n_chan] ))\n\n # Calculate network step\n self.fc1_relu = tf.nn.relu( tf.matmul( self.x,\n self.W_fc1) + self.b_fc1 )\n\n # Set up dropout option for fc1\n self.fc1_keep_prob = tf.placeholder(tf.float32)\n self.fc1_relu_drop = tf.nn.dropout(self.fc1_relu, self.fc1_keep_prob)\n\n #########################################################\n # Readout layer\n # Weights and bias\n self.fc_out_shape = [self.fc1_n_chan, self.n_output_classes]\n self.W_fc_out = tf.Variable( tf.truncated_normal(\n shape=self.fc_out_shape, stddev=0.1 ) )\n self.b_fc_out = tf.Variable( tf.constant(0.1,\n shape=[self.fc_out_shape[1]] ))\n\n # Calculate network step\n self.fc_out_lin = tf.matmul( self.fc1_relu_drop,\n self.W_fc_out ) + self.b_fc_out\n\n #########################################################\n # Define cost function and optimizer algorithm\n self.cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n logits=self.fc_out_lin, labels=self.y_trgt ) )\n self.train_step = tf.train.AdamOptimizer(self.alpha).minimize(\n self.cross_entropy )\n\n #########################################################\n # Define how to test trained model\n self.network_prediction = tf.cast( tf.argmax(\n self.fc_out_lin, 1 ), tf.float32 )\n self.is_correct_prediction = tf.equal( tf.argmax( self.fc_out_lin, 1 ),\n tf.argmax( self.y_trgt, 1 ) )\n self.accuracy = tf.reduce_mean( tf.cast(\n self.is_correct_prediction, tf.float32 ) )\n\n #########################################################\n # Create save operation\n self.saver = tf.train.Saver()", "def _build_network(self):\n self.new_trainable_variable(\"w0_sin\", np.zeros(\n (config.somites * 2 - 2, HIDDEN_LAYER_UNITS), dtype=np.float64))\n self.new_trainable_variable(\"b0_sin\", np.zeros(HIDDEN_LAYER_UNITS, dtype=np.float64))\n self.new_trainable_variable(\"w1_sin\", np.zeros(\n (HIDDEN_LAYER_UNITS, config.oscillators), dtype=np.float64))\n self.new_trainable_variable(\"b1_sin\", np.zeros(config.oscillators, dtype=np.float64))\n\n self.new_trainable_variable(\"w0_cos\", np.zeros(\n (config.somites * 2 - 2, HIDDEN_LAYER_UNITS), dtype=np.float64))\n self.new_trainable_variable(\"b0_cos\", np.zeros(HIDDEN_LAYER_UNITS, dtype=np.float64))\n self.new_trainable_variable(\"w1_cos\", np.zeros(\n (HIDDEN_LAYER_UNITS, config.oscillators), dtype=np.float64))\n self.new_trainable_variable(\"b1_cos\", np.zeros(config.oscillators, dtype=np.float64))\n\n def action_infer(state: np.array) -> np.array:\n \"\"\"\n Get state and return feedback.\n\n state: [f_0, f_1, ..., phi_0, phi_1, ..., t_0, t_1, ...]\n return: [phase_feedback0, phase_feedback1, ..., angle_range0, angle_range1, ...]\n\n Discrepancy for torsion spring = alpha / 2 * k * range * T * sin(phi_i)\n \"\"\"\n forces = state[:config.somites]\n phis = state[config.somites:config.somites + config.oscillators]\n tensions = state[config.somites + config.oscillators:]\n\n f_sin, f_cos = self._calc_fs(np.concatenate((forces, tensions)))\n discrepancies = -0.5 * config.caterpillar_params[\"vertical_ts_k\"] * config.caterpillar_params[\"realtime_tunable_ts_rom\"] * tensions * np.sin(phis)\n return f_sin * np.sin(phis) + f_cos * np.cos(phis) - self.get_discrep_coeffs() * discrepancies, np.ones(config.oscillators) * config.caterpillar_params[\"realtime_tunable_ts_rom\"]\n\n return action_infer", "def architecture(self):\n return self.random.choice([\n 'x86_64', \n 'x86'\n ])", "def __init__(self, network_path='.', logging=True,\n input_image_size=None, n_input_channels=None,\n n_output_classes=None,\n conv1_size=5, conv1_n_chan=32, conv1_n_pool=2,\n conv2_size=5, conv2_n_chan=64, conv2_n_pool=2,\n fc1_n_chan=1024, fc1_dropout=0.5, alpha=4e-4 ):\n self.logging = logging\n\n # If network path does not yet exists\n self.network_path = network_path\n if not os.path.isdir(self.network_path):\n # Make network directory\n os.mkdir(self.network_path)\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Creation of new network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log(\"\\nNetwork did not exist ... \")\n self.log(\"Created new network with supplied (or default) architecture\")\n\n # Set up new network\n self.y_res = input_image_size[0]\n self.x_res = input_image_size[1]\n self.n_input_channels = n_input_channels\n self.n_output_classes = n_output_classes\n self.conv1_size = conv1_size\n self.conv1_n_chan = conv1_n_chan\n self.conv1_n_pool = conv1_n_pool\n self.conv2_size = conv2_size\n self.conv2_n_chan = conv2_n_chan\n self.conv2_n_pool = conv2_n_pool\n self.fc1_y_size = int( np.ceil( np.ceil(\n self.y_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_x_size = int( np.ceil( np.ceil(\n self.x_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_n_chan = fc1_n_chan\n self.fc1_dropout = fc1_dropout\n self.alpha = alpha\n self.n_samples_trained = 0\n self.n_class_samples_trained = self.n_output_classes*[0]\n self.n_samples_list = []\n self.n_class_samples_list = [[] for _ in range(self.n_output_classes)]\n self.accuracy_list = [[] for _ in range(self.n_output_classes)]\n self.precision_list = [[] for _ in range(self.n_output_classes)]\n self.recall_list = [[] for _ in range(self.n_output_classes)]\n self.F1_list = [[] for _ in range(self.n_output_classes)]\n\n # Save network architecture\n self.save_network_architecture( network_path=self.network_path )\n\n else:\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Re-initialization of existing network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \" \")\n\n # Load network architecture from directory\n net_architecture = self.load_network_architecture(self.network_path)\n\n # Set up network variables from loaded architecture\n self.y_res = net_architecture['y_res']\n self.x_res = net_architecture['x_res']\n self.n_input_channels = net_architecture['n_input_channels']\n self.n_output_classes = net_architecture['n_output_classes']\n self.conv1_size = net_architecture['conv1_size']\n self.conv1_n_chan = net_architecture['conv1_n_chan']\n self.conv1_n_pool = net_architecture['conv1_n_pool']\n self.conv2_size = net_architecture['conv2_size']\n self.conv2_n_chan = net_architecture['conv2_n_chan']\n self.conv2_n_pool = net_architecture['conv2_n_pool']\n self.fc1_y_size = int( np.ceil( np.ceil(\n self.y_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_x_size = int( np.ceil( np.ceil(\n self.x_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_n_chan = net_architecture['fc1_n_chan']\n self.fc1_dropout = net_architecture['fc1_dropout']\n self.alpha = net_architecture['alpha']\n self.n_samples_trained = net_architecture['n_samples_trained']\n self.n_class_samples_trained = net_architecture['n_class_samples_trained']\n self.n_samples_list = net_architecture['n_samples_list']\n self.n_class_samples_list = net_architecture['n_class_samples_list']\n self.accuracy_list = net_architecture['accuracy_list']\n self.precision_list = net_architecture['precision_list']\n self.recall_list = net_architecture['recall_list']\n self.F1_list = net_architecture['F1_list']\n\n # Update values of alpha and dropout if supplied\n if self.alpha != alpha:\n self.alpha = alpha\n self.log(\"Updated learning rate 'alpha' to {}\".format(self.alpha))\n if self.fc1_dropout != fc1_dropout:\n self.fc1_dropout = fc1_dropout\n self.log(\"Updated dropout fraction to {}\".format(self.fc1_dropout))\n\n # Clear previous graphs\n tf.reset_default_graph()\n\n #########################################################\n # Input and target variable placeholders\n # x = [ m_samples x [channel_1_data, channel_2_data, etc.] ]\n self.x = tf.placeholder( tf.float32, shape = [None,\n self.n_input_channels * self.y_res * self.x_res] )\n self.y_trgt = tf.placeholder( tf.float32, \\\n shape = [None, self.n_output_classes] )\n\n # Convert input image to tensor with channel as last dimension\n # x_image = [-1 x im-height x im-width x n-input-channels]\n x_image_temp = tf.reshape(self.x, [-1,\n self.n_input_channels,self.y_res,self.x_res])\n x_image = tf.transpose(x_image_temp, [0,2,3,1])\n\n #########################################################\n # Set up convolutional layer 1\n # W = [im-height x im-width x n-input-channels x n-output-channels])\n self.conv1_shape = [self.conv1_size, self.conv1_size,\n self.n_input_channels, self.conv1_n_chan]\n self.W_conv1 = tf.Variable( tf.truncated_normal(\n shape=self.conv1_shape, stddev=0.1))\n self.b_conv1 = tf.Variable( tf.constant(0.1,\n shape=[self.conv1_n_chan] ))\n\n # Convolve x_image with the weight tensor\n self.conv1_lin = tf.nn.conv2d( x_image, self.W_conv1,\n strides=[1, 1, 1, 1], padding='SAME' )\n\n # Add bias and apply transfer function\n self.conv1_relu = tf.nn.relu( self.conv1_lin + self.b_conv1 )\n\n # Max pooling\n self.conv1_kernel = [1, self.conv1_n_pool, self.conv1_n_pool, 1]\n self.conv1_pool = tf.nn.max_pool( self.conv1_relu,\n ksize=self.conv1_kernel, strides=self.conv1_kernel, padding='SAME')\n\n #########################################################\n # Convolutional layer 2\n self.conv2_shape = [self.conv2_size, self.conv2_size,\n self.conv1_n_chan, self.conv2_n_chan]\n self.W_conv2 = tf.Variable( tf.truncated_normal(\n shape=self.conv2_shape, stddev=0.1 ) )\n self.b_conv2 = tf.Variable( tf.constant(0.1,\n shape=[self.conv2_n_chan] ))\n\n # Convolve x_image with the weight tensor\n self.conv2_lin = tf.nn.conv2d( self.conv1_pool, self.W_conv2,\n strides=[1, 1, 1, 1], padding='SAME' )\n\n # Add bias and apply transfer function\n self.conv2_relu = tf.nn.relu( self.conv2_lin + self.b_conv2 )\n\n # Max pooling\n self.conv2_kernel = [1, self.conv2_n_pool, self.conv2_n_pool, 1]\n self.conv2_pool = tf.nn.max_pool( self.conv2_relu,\n ksize=self.conv2_kernel, strides=self.conv2_kernel, padding='SAME')\n\n\n #########################################################\n # Densely Connected Layer\n # Weights and bias\n self.fc1_shape = [self.fc1_y_size * self.fc1_x_size * self.conv2_n_chan,\n self.fc1_n_chan]\n self.W_fc1 = tf.Variable( tf.truncated_normal(\n shape=self.fc1_shape, stddev=0.1 ) )\n self.b_fc1 = tf.Variable( tf.constant(0.1, shape=[self.fc1_n_chan] ))\n\n # Flatten output from conv2\n self.conv2_pool_flat = tf.reshape(\n self.conv2_pool, [-1, self.fc1_shape[0]] )\n\n # Calculate network step\n self.fc1_relu = tf.nn.relu( tf.matmul( self.conv2_pool_flat,\n self.W_fc1) + self.b_fc1 )\n\n # Set up dropout option for fc1\n self.fc1_keep_prob = tf.placeholder(tf.float32)\n self.fc1_relu_drop = tf.nn.dropout(self.fc1_relu, self.fc1_keep_prob)\n\n #########################################################\n # Readout layer\n # Weights and bias\n self.fc_out_shape = [self.fc1_n_chan, self.n_output_classes]\n self.W_fc_out = tf.Variable( tf.truncated_normal(\n shape=self.fc_out_shape, stddev=0.1 ) )\n self.b_fc_out = tf.Variable( tf.constant(0.1,\n shape=[self.fc_out_shape[1]] ))\n\n # Calculate network step\n self.fc_out_lin = tf.matmul( self.fc1_relu_drop,\n self.W_fc_out ) + self.b_fc_out\n\n #########################################################\n # Define cost function and optimizer algorithm\n self.cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n logits=self.fc_out_lin, labels=self.y_trgt ) )\n self.train_step = tf.train.AdamOptimizer(self.alpha).minimize(\n self.cross_entropy )\n\n #########################################################\n # Define how to test trained model\n self.network_prediction = tf.cast( tf.argmax(\n self.fc_out_lin, 1 ), tf.float32 )\n self.is_correct_prediction = tf.equal( tf.argmax( self.fc_out_lin, 1 ),\n tf.argmax( self.y_trgt, 1 ) )\n self.accuracy = tf.reduce_mean( tf.cast(\n self.is_correct_prediction, tf.float32 ) )\n\n #########################################################\n # Create save operation\n self.saver = tf.train.Saver()", "def architecture(executable=None, bits='', linkage=''): ###\n # Use the sizeof(pointer) as default number of bits if nothing\n # else is given as default.\n if not bits:\n import struct\n try:\n size = struct.calcsize('P')\n except ValueError: ###\n # Older installations can only query longs\n size = struct.calcsize('l')\n bits = str(size*8) + 'bit'\n\n return bits, linkage", "def init_net(self):\r\n # initialize the generator network\r\n g_net = Net(\r\n self.architecture['generator'], net_name='gen',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Gen = Routine(g_net)\r\n self.Gen.add_input_layers([64, self.code_size], [0])\r\n self.Gen.seq_links(list(range(g_net.num_layers)))\r\n self.Gen.add_output_layers([g_net.num_layers - 1])\r\n\r\n # initialize the generator network\r\n d_net = Net(\r\n self.architecture['discriminator'], net_name='dis',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Dis = Routine(d_net)\r\n self.Dis.add_input_layers([64] + list(self.architecture['input'][0]), [0])\r\n self.Dis.seq_links(list(range(d_net.num_layers)))\r\n self.Dis.add_output_layers([d_net.num_layers - 1])", "def build_arch(self):\n return self._build_arch", "def build(self):\n self.build_inputs()\n self.build_word_embeddings()\n self.build_encoder()\n self.build_fc()\n self.build_loss()\n self.build_global_step()", "def __init__(self, network_path='.', logging=True,\n input_image_size=None, n_input_channels=None,\n n_output_classes=None,\n fc1_dropout=1.0, alpha=4e-4 ):\n self.logging = logging\n\n # If network path does not yet exists\n self.network_path = network_path\n if not os.path.isdir(self.network_path):\n # Make network directory\n os.mkdir(self.network_path)\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Creation of new network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log(\"\\nNetwork did not exist ... \")\n self.log(\"Created new network with supplied (or default) architecture\")\n\n # Set up new network\n self.y_res = input_image_size[0]\n self.x_res = input_image_size[1]\n self.n_input_channels = n_input_channels\n self.n_output_classes = n_output_classes\n self.fc1_dropout = fc1_dropout\n self.alpha = alpha\n self.n_samples_trained = 0\n self.n_class_samples_trained = self.n_output_classes*[0]\n self.n_samples_list = []\n self.n_class_samples_list = [[] for _ in range(self.n_output_classes)]\n self.accuracy_list = [[] for _ in range(self.n_output_classes)]\n self.precision_list = [[] for _ in range(self.n_output_classes)]\n self.recall_list = [[] for _ in range(self.n_output_classes)]\n self.F1_list = [[] for _ in range(self.n_output_classes)]\n\n # Save network architecture\n self.save_network_architecture( network_path=self.network_path )\n\n else:\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Re-initialization of existing network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \" \")\n\n # Load network architecture from directory\n net_architecture = self.load_network_architecture(self.network_path)\n\n # Set up network variables from loaded architecture\n self.y_res = net_architecture['y_res']\n self.x_res = net_architecture['x_res']\n self.n_input_channels = net_architecture['n_input_channels']\n self.n_output_classes = net_architecture['n_output_classes']\n self.fc1_dropout = net_architecture['fc1_dropout']\n self.alpha = net_architecture['alpha']\n self.n_samples_trained = net_architecture['n_samples_trained']\n self.n_class_samples_trained = net_architecture['n_class_samples_trained']\n self.n_samples_list = net_architecture['n_samples_list']\n self.n_class_samples_list = net_architecture['n_class_samples_list']\n self.accuracy_list = net_architecture['accuracy_list']\n self.precision_list = net_architecture['precision_list']\n self.recall_list = net_architecture['recall_list']\n self.F1_list = net_architecture['F1_list']\n\n # Update values of alpha and dropout if supplied\n if self.alpha != alpha:\n self.alpha = alpha\n self.log(\"Updated learning rate 'alpha' to {}\".format(self.alpha))\n if self.fc1_dropout != fc1_dropout:\n self.fc1_dropout = fc1_dropout\n self.log(\"Updated dropout fraction to {}\".format(self.fc1_dropout))\n\n # Clear previous graphs\n tf.reset_default_graph()\n\n #########################################################\n # Input and target variable placeholders\n # x = [ m_samples x [channel_1_data, channel_2_data, etc.] ]\n self.x = tf.placeholder( tf.float32, shape = [None,\n self.n_input_channels * self.y_res * self.x_res] )\n self.y_trgt = tf.placeholder( tf.float32, \\\n shape = [None, self.n_output_classes] )\n\n # Set up dropout option for inputs\n self.fc1_keep_prob = tf.placeholder(tf.float32)\n self.x_drop = tf.nn.dropout(self.x, self.fc1_keep_prob)\n\n #########################################################\n # Readout layer\n # Weights and bias\n self.fc_out_shape = \\\n [self.y_res * self.x_res * self.n_input_channels,\n self.n_output_classes]\n self.W_fc_out = tf.Variable( tf.truncated_normal(\n shape=self.fc_out_shape, stddev=0.1 ) )\n self.b_fc_out = tf.Variable( tf.constant(0.1,\n shape=[self.fc_out_shape[1]] ))\n\n # Calculate network step\n self.fc_out_lin = tf.matmul( self.x_drop,\n self.W_fc_out ) + self.b_fc_out\n\n #########################################################\n # Define cost function and optimizer algorithm\n self.cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n logits=self.fc_out_lin, labels=self.y_trgt ) )\n self.train_step = tf.train.AdamOptimizer(self.alpha).minimize(\n self.cross_entropy )\n\n #########################################################\n # Define how to test trained model\n self.network_prediction = tf.cast( tf.argmax(\n self.fc_out_lin, 1 ), tf.float32 )\n self.is_correct_prediction = tf.equal( tf.argmax( self.fc_out_lin, 1 ),\n tf.argmax( self.y_trgt, 1 ) )\n self.accuracy = tf.reduce_mean( tf.cast(\n self.is_correct_prediction, tf.float32 ) )\n\n #########################################################\n # Create save operation\n self.saver = tf.train.Saver()", "def _create_model(self, arch, num_output_channels, num_input_channels, pretrained, **kwargs):\n\n self.net = None\n\n #--------------------------------------------------------------------------------------------\n # select architecture\n #--------------------------------------------------------------------------------------------\n num_filters = kwargs.get(\"num_filters\", 32)\n kw = {'dim': num_output_channels, 'num_classes': self.num_classes, 'num_channels': num_input_channels, 'pretrained': pretrained,\n 'num_filters': num_filters}\n print(\"kw\", kw)\n self.net = nnmodels.__dict__[arch](**kw)\n\n self.s_arch = arch\n self.num_output_channels = num_output_channels\n self.num_input_channels = num_input_channels\n self.num_filters = num_filters\n\n if self.cuda:\n self.net.cuda()\n if self.parallel and self.cuda:\n self.net = nn.DataParallel(self.net, device_ids= range( torch.cuda.device_count() ))", "def build_graph(self, graph, inst_name, port_nets):\n self.add_graph_edges(graph, port_nets)", "def LeNet5_architecture(self, input_shape):\r\n\r\n # Convolution layer (C1) hyperparameters\r\n s1 = self.hparameters[\"s1\"]\r\n f1 = self.hparameters[\"f1\"]\r\n n1 = self.hparameters[\"n1\"]\r\n\r\n # Average pooling layer(S2) hyperparameters\r\n s2 = self.hparameters[\"s2\"]\r\n f2 = self.hparameters[\"f2\"]\r\n\r\n # Convolutional layer (C3) hyperparameters\r\n s3 = self.hparameters[\"s3\"]\r\n f3 = self.hparameters[\"f3\"]\r\n n3 = self.hparameters[\"n3\"]\r\n\r\n # Average pooling layers (S4) hyperparameters\r\n s4 = self.hparameters[\"s4\"]\r\n f4 = self.hparameters[\"f4\"]\r\n\r\n # Convolutional layer (C5) hyperparameters\r\n s5 = self.hparameters[\"s5\"]\r\n f5 = self.hparameters[\"f5\"]\r\n n5 = self.hparameters[\"n5\"]\r\n\r\n # Number of outputs\r\n num_classes = self.num_classes\r\n\r\n X_input = Input(input_shape)\r\n X = X_input\r\n\r\n # Convolution layer 1\r\n X = Conv2D(n1, (f1,f1), strides = (s1, s1), padding = 'valid', name = 'C1', kernel_initializer = glorot_uniform(seed = 0))(X)\r\n # Average pooling\r\n X = AveragePooling2D(pool_size= (f2,f2), strides = (s2,s2), padding = 'valid', name = 'S2')(X)\r\n # Activation\r\n X = Activation('tanh')(X)\r\n # Convolution layer 2\r\n X = Conv2D(n3, (f3,f3), strides = (s3, s3), padding = 'valid', name = 'C3', kernel_initializer = glorot_uniform(seed = 0))(X)\r\n #Average pooling\r\n X = AveragePooling2D(pool_size= (f4,f4), strides = (s4,s4), padding = 'valid', name = 'S4')(X)\r\n # Activation\r\n X = Activation('tanh')(X)\r\n # Convolutional layer 3\r\n X = Conv2D(n5, (f5,f5), strides = (s5, s5), padding = 'valid', name = 'C5', kernel_initializer = glorot_uniform(seed = 0))(X)\r\n # Activation\r\n X = Activation('tanh')(X)\r\n # Flatten\r\n X = Flatten()(X)\r\n # Fully Connected layer\r\n X = Dense(num_classes, activation = 'softmax', name = 'FC', kernel_initializer = glorot_uniform(seed = 0))(X)\r\n\r\n #create model\r\n model = Model(inputs = X_input, outputs = X, name = 'LeNet5')\r\n\r\n return model", "def initialise_network(self):\n raise NotImplementedError", "def register_networks(backend):\n from .network_desc import NetworkDesc\n from .adelaide import AdelaideFastNAS\n from .erdb_esr import ESRN\n from .mobilenet import MobileNetV3Tiny, MobileNetV2Tiny\n from .mobilenetv3 import MobileNetV3Small, MobileNetV3Large\n from .sgas_network import SGASNetwork\n from .necks import FPN\n from .bert import BertClassifier\n from . import resnet\n from . import quant\n from . import mtm_sr\n from . import super_network\n from . import resnet_det\n from . import resnet_general\n from . import resnext_det\n from . import xt_model\n from . import text_cnn\n from . import faster_rcnn\n if backend == \"pytorch\":\n from . import spnet_backbone\n from . import faster_backbone\n from . import pytorch\n elif backend == \"tensorflow\":\n from . import spnet_backbone\n from . import faster_backbone\n from . import tensorflow\n elif backend == \"mindspore\":\n from . import mindspore", "def build_net(self, nodes, links, output_network, from_geometry=True, debug=False):\n _nodes = nodes.copy()\n _links = links.copy()\n\n if from_geometry:\n _nodes[['x', 'y']] = _nodes['geometry'].apply(lambda g: pd.Series([g.coords[0][0], g.coords[0][1]]))\n _nodes.drop(['geometry'], axis=1, errors='ignore', inplace=True)\n\n pandasdbf.write_dbf(_nodes, self.environment + r'\\temp_nodes_to_dbf.dbf', pre_process=False)\n pandasdbf.write_dbf(_links, self.environment + r'\\temp_links_to_dbf.dbf', pre_process=False)\n\n script_text = r\"\"\"\n\n RUN PGM=NETWORK PRNFILE=\"%s\\temp_net.prn\"\n FILEO NETO = \"%s\"\n FILEI LINKI[1] = \"%s\"\n FILEI NODEI[1] = \"%s\"\n ENDRUN\n\n \"\"\" % (\n self.environment,\n output_network,\n self.environment + r'\\temp_links_to_dbf.dbf',\n self.environment + r'\\temp_nodes_to_dbf.dbf'\n )\n\n # creating a cube script\n script = open(self.environment + r'\\build_net.s', 'w', encoding='latin')\n script.write(script_text)\n script.close()\n\n # runs the script with voyager.exe\n options = \"\"\"/Start /CloseWhenDone /Minimize /NoSplash\"\"\" if not debug else \"\"\n cmd = 'voyager.exe \"' + self.environment + r'\\build_net.s\" ' + options\n print(cmd)\n os.system(cmd)", "def build_neuron_network(nb_features_map: Union[Sequence[int], None] = None,\n size_linear_layers: Union[Sequence[int], None] = None,\n dropout_rate: Union[Tuple[float, float], float] = 0.3,\n conv_kernel_size: Union[Sequence[int], int] = 3,\n conv_stride: int = 1,\n conv_padding: int = 1,\n conv_activation: str = \"relu\",\n conv_architecture: str = \"CPD\",\n pool_kernel_size: int = 2,\n pool_stride: int = 2,\n dense_activation: str = \"relu\",\n pretrained: Union[str, None] = None,\n grayscale: bool = True,\n optimizer: str = \"Adam\",\n weight_decay: float = 0.,\n learning_rate: float = 0.001,\n ) -> Tuple[nn.Module, List, torch.optim.Optimizer]:\n # Initializations\n if pretrained is not None:\n grayscale = False\n if grayscale:\n channels = 1\n else:\n channels = 3\n if nb_features_map is None:\n nb_features_map = [8]\n if size_linear_layers is None:\n size_linear_layers = []\n height = 224\n width = 224\n module = nn.Module()\n shapes = [(\"input\", channels, height, width)]\n layers = {\"extractor\": [], \"regressor\": []}\n if not hasattr(dropout_rate, \"__len__\"):\n dropout_rate = (dropout_rate, 0.)\n next_dropout_rate = dropout_rate[0]\n # If a pretrained model is used:\n if pretrained is None:\n # Input checks\n if hasattr(conv_kernel_size, \"__len__\"):\n if len(conv_kernel_size) != len(nb_features_map):\n raise ValueError(\"The length of nb_features_map shall match the length of conv_kernel_size\")\n else:\n conv_kernel_size = [conv_kernel_size] * len(nb_features_map)\n # Feature extractor\n next_layer_type = itertools.cycle(conv_architecture)\n nb_feature_map = None\n i = 0\n while True:\n layer_type = next(next_layer_type)\n if layer_type == \"C\":\n # Convolutional layer\n try:\n nb_feature_map = nb_features_map[i]\n except IndexError:\n break\n name = \"conv2d-{:02d}\".format(i+1)\n conv = nn.Conv2d(shapes[-1][1], nb_feature_map, conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n layers[\"extractor\"].append((name, conv))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n shapes.append((name, nb_feature_map, h, w))\n i += 1\n # Activation\n if conv_activation == \"relu\":\n activ = nn.ReLU()\n elif conv_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif conv_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(conv_activation, i)\n layers[\"extractor\"].append((name, activ))\n # activation does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n elif layer_type == \"P\":\n # Max-pooling\n name = \"maxpool2d-{:02d}\".format(i)\n pool = nn.MaxPool2d(pool_kernel_size, pool_stride)\n layers[\"extractor\"].append((name, pool))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=pool_kernel_size, stride=pool_stride)\n shapes.append((name, nb_feature_map, h, w))\n elif layer_type == \"D\":\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"extractor\"].append((name, dropout))\n # Dropout does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n next_dropout_rate += dropout_rate[1]\n elif layer_type == \"B\":\n # Batch normalization\n name = \"batchnorm-{:02d}\".format(i)\n batch = nn.BatchNorm2d(shapes[-1][1])\n layers[\"extractor\"].append((name, batch))\n # Batch norm. does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n # Add a flatten layer\n name = \"flatten\"\n flatten = nn.Flatten(1)\n layers[\"extractor\"].append((name, flatten))\n shapes.append((name, shapes[-1][1] * shapes[-1][2] * shapes[-1][3]))\n # Create extractor module\n extractor = nn.Sequential(OrderedDict(layers[\"extractor\"]))\n module.add_module(\"extractor\", extractor)\n elif pretrained == \"VGG16\":\n pre_trained = models.vgg16(pretrained=True)\n modules = []\n for _name, _module in pre_trained.named_children():\n if _name != 'classifier':\n modules.append((_name, _module))\n modules.append((\"flatten\", nn.Flatten(1)))\n vgg16 = nn.Sequential(OrderedDict(modules))\n # Freeze all parameters in the pre-trained model\n # So we prevent gradients from being calculated, it will save computation time\n for param in vgg16.parameters():\n param.requires_grad = False\n module.add_module('extractor', vgg16)\n shapes.append((pretrained, 25088))\n else:\n raise ValueError(f\"Unknown pre-trained model '{pretrained}'.\")\n # Regressor\n for i, size_linear_layer in enumerate(size_linear_layers):\n # Add a linear layer\n name = \"linear-{:02d}\".format(i + 1)\n linear = nn.Linear(shapes[-1][1], size_linear_layer)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, size_linear_layer))\n # Activation\n if dense_activation == \"relu\":\n activ = nn.ReLU()\n elif dense_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif dense_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(dense_activation, i + 1)\n layers[\"regressor\"].append((name, activ))\n shapes.append((name, shapes[-1][1])) # activation does not change the size\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i + 1)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"regressor\"].append((name, dropout))\n shapes.append((name, shapes[-1][1])) # Dropout does not change the size of array\n next_dropout_rate += dropout_rate[1]\n # Add the final layer, the output size is fixed to 68 x 2 = 136\n name = \"output\"\n linear = nn.Linear(shapes[-1][1], 136)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, 136))\n # Create regressor module\n regressor = nn.Sequential(OrderedDict(layers[\"regressor\"]))\n module.add_module(\"regressor\", regressor)\n # Weight initialization\n module.apply(weight_initialization)\n # Optimizer\n if optimizer == \"Adam\":\n optim = torch.optim.Adam(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"AdamW\":\n optim = torch.optim.AdamW(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"SGD\":\n optim = torch.optim.SGD(module.parameters(), lr=learning_rate, weight_decay=weight_decay, momentum=0.9)\n else:\n raise ValueError(f\"Unknown optimizer {optimizer}.\")\n return module, shapes, optim", "def _build_forward_graph(self):\n\n print('[*] Building a Neural Turing Machine.')\n\n self._initalize_state()\n\n # present start token\n controller_out = self.controller.emit_feature_vector(self.start_token, self.r_t[0], reuse=None)\n self._read_write(controller_out, reuse=None)\n\n # present inputs\n print('Input chain: ')\n for t in range(0, self.sequence_length):\n print_progress(float(t + 1) / self.sequence_length)\n\n controller_out = self.controller.emit_feature_vector(self.inputs[t], self.r_t[-1], reuse=True)\n self._read_write(controller_out, reuse=True)\n\n # present end token\n controller_out = self.controller.emit_feature_vector(self.end_token, self.r_t[-1], reuse=True)\n self._read_write(controller_out, reuse=True)\n\n # present outputs\n print('Output chain: ')\n for t in range(0, self.sequence_length):\n print_progress(float(t + 1) / self.sequence_length)\n\n controller_out = self.controller.emit_feature_vector(self.zeros, self.r_t[-1], reuse=True)\n self._read_write(controller_out, reuse=True)\n\n reuse = None if t == 0 else True\n self.outputs.append(self._decode_read_vector(self.r_t[-1], reuse=reuse))\n print('Done.')", "def build(data_shape_1, data_shape_2):\n # create NN model \n # design network\n \n inputs = keras.Input(shape=(data_shape_1, data_shape_2), name='inp')\n cnn1 = layers.Conv1D(16, 5, activation='relu')(inputs)\n cnn2 = layers.Conv1D(32, 3, activation='relu')(cnn1)\n cnn3 = layers.Conv1D(64, 3, activation='relu')(cnn2)\n cnn3 = layers.Flatten()(cnn3)\n lstm = layers.LSTM(100,return_sequences = True, activation='relu')(inputs)\n lstm = layers.Flatten()(lstm)\n x = layers.concatenate([cnn3,lstm])\n x = layers.Dense(100, activation='sigmoid')(x)\n outputs = layers.Dense(24)(x)\n\n model = keras.Model(inputs=inputs, outputs=outputs, name='mnist_model')\n \n return model", "def build_inference_graph(self):\n self.build_train_graph()", "def create_network(outfname_train, outfname_deploy, N_conv_layers=3, N_fully_connected_layers=3, batch_size_train=100,batch_size_test=100, source_train='datatrain', source_test='datatest', num_output_conv=32, kernel_size=3, weight_std_conv=0.01, activation='relu', num_output_fully_connected=64, weight_std_fully_connected=0.01, do_batchnorm=1, do_last_batchnorm=1, scale=1,shift=0, weight_std_affine=0, use_softmax=0, num_classes=3, input_dim_1=1,input_dim_2=3, input_dim_3=32, input_dim_4=32, use_lowrank=1, T_dimension=None, softmax_weight=1, lowrank_weight=1, data_type='lmdb'):\n\n if T_dimension==None:\n T_dimension = num_classes\n \n train_txt = \"\"\n deploy_txt = \"\"\n\n train_txt += data_layer(name='data_layer', source_train=source_train, batch_size_train=batch_size_train, source_test=source_test, batch_size_test=batch_size_test, data_type=data_type)\n\n deploy_txt += deploy_data_layer(name='data_layer', input_dim_1=input_dim_1, input_dim_2=input_dim_2, input_dim_3=input_dim_3, input_dim_4=input_dim_4)\n\n last_name = 'data'\n\n ####### CONVOLUTIONAL LAYERS\n for i in range(N_conv_layers):\n conv_name = 'conv%i' % (i+1)\n top = conv_name\n\n conv_txt = convolution_layer(conv_name, last_name, num_output=num_output_conv, kernel_size=kernel_size, weight_std=weight_std_conv)\n\n train_txt += conv_txt\n deploy_txt += conv_txt\n \n if activation == 'pool':\n pool_name = 'pool%i' % (i+1)\n activation_txt = pooling_layer(pool_name, conv_name)\n last_name = pool_name\n elif activation == 'relu':\n relu_name = 'relu%i' % (i+1)\n activation_txt = relu_layer(relu_name, conv_name)\n last_name = conv_name\n else:\n raise Exception('Unknown activation')\n \n\n train_txt += activation_txt\n deploy_txt += activation_txt\n\n \n\n ####### FULLY CONNECTED LAYERS\n for i in range(N_fully_connected_layers):\n fully_connected_name = 'ip%i' % (i+1)\n\n fully_connected_txt = fully_connected_layer(fully_connected_name, last_name, num_output=num_output_fully_connected, weight_std=weight_std_fully_connected)\n\n relu_name = 'iprelu%i' % (i+1)\n relu_txt = relu_layer(relu_name, fully_connected_name)\n\n batchnorm_name = 'ipbn%i' % (i+1)\n\n if do_batchnorm and i<N_fully_connected_layers-1:\n batchnorm_txt_train = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=False, phase='TRAIN', deploy=False)\n batchnorm_txt_test = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=True, phase='TEST', deploy=False)\n \n batchnorm_txt_deploy = batchnorm_layer(batchnorm_name, fully_connected_name, deploy=True)\n scale_txt = ''\n \n last_name = batchnorm_name\n \n elif do_last_batchnorm:\n batchnorm_txt_train = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=False, phase='TRAIN', deploy=False)\n batchnorm_txt_test = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=True, phase='TEST', deploy=False)\n \n batchnorm_txt_deploy = batchnorm_layer(batchnorm_name, fully_connected_name, deploy=True)\n scale_name = 'ipbnscaled%i' % (i+1)\n\n scale_txt = scale_layer(scale_name, batchnorm_name, scale=scale,shift=shift)\n \n last_name = scale_name\n else:\n batchnorm_txt_train = ''\n batchnorm_txt_test = ''\n batchnorm_txt_deploy = ''\n last_name = fully_connected_name\n scale_txt = ''\n \n train_txt += fully_connected_txt + relu_txt + batchnorm_txt_train + batchnorm_txt_test + scale_txt\n deploy_txt += fully_connected_txt + relu_txt + batchnorm_txt_deploy + scale_txt\n \n\n\n\n\n # add affine layer on top of funnel layer \n affine_name = 'affine' # (matrix T)\n affine_txt = fully_connected_layer(affine_name, last_name, num_output=T_dimension, weight_std=weight_std_affine)\n\n train_txt += affine_txt\n deploy_txt += affine_txt\n \n # apply lowrank loss to output of 'affine' layer [conv - fully_connected -\n # funnel - affine - lowrank] the lowrank output is located in affine. The\n # 'funnel' layer is used to allow softmax to separate between classes before\n # LRT\n if use_lowrank:\n lowrank_txt = lowrank_layer('lowrank_loss', affine_name, loss_weight=lowrank_weight)\n train_txt += lowrank_txt\n\n if use_softmax:\n # apply softmax loss to output of funnel layer [conv - fully_connected - funnel - softmax]\n # add one affine layer to reduce from num_output_fully_connected to num_classes\n\n # apr 4. trying on top of fully connected layer\n funnel_name = 'funnel'\n funnel_txt = fully_connected_layer(funnel_name, last_name, num_output=num_classes, weight_std=weight_std_fully_connected)\n\n train_txt += funnel_txt\n deploy_txt += funnel_txt\n\n softmax_txt = softmax_layer('softmax_loss', funnel_name, loss_weight=softmax_weight)\n train_txt += softmax_txt\n\n write_to_file(outfname_train, train_txt)\n write_to_file(outfname_deploy, deploy_txt)\n\n \n return train_txt, deploy_txt", "def build_modules(self):\n self.backbone = Backbone(\n self.configs['backbone'],\n freeze_backbone=self.configs['freeze_backbone'],\n freeze_batchnorm=True\n )\n\n backbone_channel_sizes = get_backbone_channel_sizes(self.backbone)\n\n self.fpn = FeaturePyramidNetwork(\n backbone_channel_sizes=backbone_channel_sizes,\n min_feature_level=self.configs['min_feature_level'],\n max_feature_level=self.configs['max_feature_level'],\n feature_size=self.configs['pyramid_feature_size']\n )\n\n self.shared_conv_model = SharedConvModel(\n input_feature_size=self.configs['pyramid_feature_size'],\n feature_size=self.configs['shared_conv_feature_size'],\n num_layers=self.configs['shared_conv_num_layers']\n )\n\n if self.configs['shared_conv_num_layers'] > 0:\n shared_conv_output_size = self.configs['shared_conv_feature_size']\n else:\n shared_conv_output_size = self.configs['pyramid_feature_size']\n\n self.ofn = ObjectFinderNetwork(\n input_feature_size=shared_conv_output_size,\n feature_size=self.configs['finder_feature_size'],\n num_layers=self.configs['finder_num_layers']\n )\n\n self.ofn_loss_fn\n\n # self.classification_model = ClassificationModel()\n #\n # self.regression_model = RegressionModel()", "def generate_networks(self):\n\n # Defines dictionary of residue interaction types to include as network\n # edges.\n #**N.B.** Might want to provide these interactions as a program input?\n # **N.B.** 'intra' in the interaction names dict refers to interactions\n # between residues in the same chain\n interactions = [['hb', 'hb_pairs', 'hb_pairs_fasta_intra'],\n ['nhb', 'nhb_pairs', 'nhb_pairs_fasta_intra'],\n ['plusminus2', 'minus_2', 'minus_2_fasta'],\n ['plusminus2', 'plus_2', 'plus_2_fasta'],\n ['plusminus1', 'minus_1', 'minus_1_fasta'],\n ['plusminus1', 'plus_1', 'plus_1_fasta'],\n ['vdw', 'van_der_waals', 'van_der_waals_fasta_intra']]\n\n # Initialises MultiGraph (= undirected graph with self loops and\n # parallel edges) network of interacting residues\n G = nx.MultiGraph()\n\n # Adds nodes (= residues) to MultiGraph, labelled with their side-chain\n # identity (initially set to unknown), z-coordinate, buried surface area\n # (sandwiches only) and whether they are edge or central strands\n # (sandwiches only).\n if self.barrel_or_sandwich == '2.40':\n for num in range(self.input_df.shape[0]):\n node = self.input_df['domain_ids'][num] + self.input_df['res_ids'][num]\n aa_id = self.input_df['fasta_seq'][num]\n int_or_ext = self.input_df['int_ext'][num][0:3]\n z_coord = self.input_df['z_coords'][num]\n try:\n phi_psi_class = self.input_df['phi_psi_class'][num]\n except KeyError:\n phi_psi_class = '-'\n if not int_or_ext in ['int', 'ext']:\n raise ValueError('Residue {} has not been assigned to the '\n 'interior or exterior surface of the input'\n ' beta-barrel structure'.format(node))\n G.add_node(node, type='strand', aa_id=aa_id, int_ext=int_or_ext,\n eoc='-', z=z_coord, phipsi=phi_psi_class)\n elif self.barrel_or_sandwich == '2.60':\n for num in range(self.input_df.shape[0]):\n node = self.input_df['domain_ids'][num] + self.input_df['res_ids'][num]\n aa_id = self.input_df['fasta_seq'][num]\n int_or_ext = self.input_df['int_ext'][num][0:3]\n z_sandwich_coord = self.input_df['sandwich_z_coords'][num]\n #z_strand_coord = self.input_df['strand_z_coords'][num]\n #buried_surface_area = self.input_df['buried_surface_area'][num]\n edge_or_central = self.input_df['edge_or_central'][num][0:3]\n try:\n phi_psi_class = self.input_df['phi_psi_class'][num]\n except KeyError:\n phi_psi_class = '-'\n if not int_or_ext in ['int', 'ext']:\n raise ValueError('Residue {} has not been assigned to the '\n 'interior or exterior surface of the input'\n ' beta-barrel structure'.format(node))\n G.add_node(node, type='strand', aa_id=aa_id, int_ext=int_or_ext,\n z=z_sandwich_coord,\n #zstrand=z_strand_coord, bsa=buried_surface_area,\n eoc=edge_or_central,\n phipsi=phi_psi_class)\n\n domain_res_ids = list(G.nodes())\n\n # Adds edges (= residue interactions) to MultiGraph, labelled by\n # interaction type. The interactions considered are defined in\n # interactions_dict.\n for int_list in interactions:\n edge_label = int_list[0]\n int_name = int_list[1]\n int_fasta = int_list[2]\n\n for num in range(self.input_df.shape[0]):\n res_1 = self.input_df['domain_ids'][num] + self.input_df['res_ids'][num]\n res_list = self.input_df[int_name][num]\n if type(res_list) != list:\n res_list = [res_list]\n\n for res_index, res_2 in enumerate(res_list):\n res_2 = self.input_df['domain_ids'][num] + res_2\n # Accounts for interactions between residue pairs where one\n # residue is in the beta-barrel/sandwich domain and the\n # other is within a loop region\n aa_id = self.input_df[int_fasta][num][res_index]\n if not res_2 in list(G.nodes()):\n G.add_node(res_2, type='loop', aa_id=aa_id)\n if aa_id != G.nodes()[res_2]['aa_id']:\n print(aa_id, G.nodes()[res_2]['aa_id'])\n raise ValueError(\n 'Identity of node {} is inconsistent according to '\n 'the pairwise interactions listed in {} '\n '{}'.format(res_2, self.input_df_path, edge_label)\n )\n\n # Ensures interactions are only added to the network once\n if G.has_edge(res_1, res_2) is False:\n G.add_edge(res_1, res_2, interaction=edge_label)\n elif G.has_edge(res_1, res_2) is True:\n attributes = [val for label, sub_dict in\n dict(G[res_1][res_2]).items() for key,\n val in sub_dict.items()]\n if not edge_label in attributes:\n G.add_edge(res_1, res_2, interaction=edge_label)\n\n return G", "def setup_net(self):\n pass", "def build_model(self):\n cfg = self.cfg\n\n print('Building model')\n self.model = SimpleNet(cfg, cfg.MODEL, 0, **cfg.MODEL.BACKBONE.PARAMS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n self.optim = build_optimizer(self.model, cfg.OPTIM)\n self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)\n self.register_model('model', self.model, self.optim, self.sched)\n\n fdim = self.model.fdim\n self.classifier = nn.Linear(fdim, self.num_classes)\n print('# params: {:,}'.format(count_num_param(self.classifier)))\n self.classifier.to(self.device)\n self.optim_classifier = build_optimizer(self.classifier, cfg.OPTIM)\n self.sched_classifier = build_lr_scheduler(self.optim_classifier, cfg.OPTIM)\n self.register_model('classifier', self.classifier, self.optim_classifier, self.sched_classifier)", "def build_graph(self):\n raise NotImplementedError", "def architecture(self) -> str:\n return pulumi.get(self, \"architecture\")", "def architecture(self) -> str:\n return pulumi.get(self, \"architecture\")", "def load_network_architecture(self,network_path):\n net_architecture = np.load(\n os.path.join(network_path,'net_architecture.npy')).item()\n self.log(\"Network architecture loaded from file:\\n{}\".format(\n os.path.join(network_path,'net_architecture.npy')))\n return net_architecture", "def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n\n # Compute the Q-values which are used for action selection in the current\n # state.\n self._net_outputs = self.online_convnet(self.state_ph,\n self.num_quantile_samples)\n # Shape of self._net_outputs.quantile_values:\n # num_quantile_samples x num_actions.\n # e.g. if num_actions is 2, it might look something like this:\n # Vals for Quantile .2 Vals for Quantile .4 Vals for Quantile .6\n # [[0.1, 0.5], [0.15, -0.3], [0.15, -0.2]]\n # Q-values = [(0.1 + 0.15 + 0.15)/3, (0.5 + 0.15 + -0.2)/3].\n self._q_values = tf.reduce_mean(self._net_outputs.quantile_values, axis=0)\n self._q_argmax = tf.argmax(self._q_values, axis=0)\n self._policy_logits = tf.nn.softmax(self._q_values / self.tau, axis=0)\n self._stochastic_action = tf.random.categorical(\n self._policy_logits[None, Ellipsis],\n num_samples=1,\n dtype=tf.int32)[0][0]\n\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n self.num_tau_samples)\n # Shape: (num_tau_samples x batch_size) x num_actions.\n self._replay_net_quantile_values = self._replay_net_outputs.quantile_values\n self._replay_net_quantiles = self._replay_net_outputs.quantiles\n\n # Do the same for next states in the replay buffer.\n self._replay_net_target_outputs = self.target_convnet(\n self._replay.next_states, self.num_tau_prime_samples)\n # Shape: (num_tau_prime_samples x batch_size) x num_actions.\n vals = self._replay_net_target_outputs.quantile_values\n self._replay_net_target_quantile_values = vals\n\n # Compute Q-values which are used for action selection for the states and\n # next states in the replay buffer.\n target_next_action = self.target_convnet(self._replay.next_states,\n self.num_quantile_samples)\n target_action = self.target_convnet(self._replay.states,\n self.num_quantile_samples)\n\n # Shape: (num_quantile_samples x batch_size) x num_actions.\n target_next_quantile_values_action = target_next_action.quantile_values\n # Shape: num_quantile_samples x batch_size x num_actions.\n target_next_quantile_values_action = tf.reshape(\n target_next_quantile_values_action,\n [self.num_quantile_samples, self._replay.batch_size, self.num_actions])\n\n # Shape: (num_quantile_samples x batch_size) x num_actions.\n target_quantile_values_action = target_action.quantile_values\n # Shape: num_quantile_samples x batch_size x num_actions.\n target_quantile_values_action = tf.reshape(target_quantile_values_action,\n [self.num_quantile_samples,\n self._replay.batch_size,\n self.num_actions])\n # Shape: batch_size x num_actions.\n self._replay_next_target_q_values = tf.squeeze(tf.reduce_mean(\n target_next_quantile_values_action, axis=0))\n self._replay_target_q_values = tf.squeeze(tf.reduce_mean(\n target_quantile_values_action, axis=0))\n\n self._replay_next_qt_argmax = tf.argmax(\n self._replay_next_target_q_values, axis=1)", "def build(self, config):\n nets = OrderedDict()\n\n nets['shared'] = NeuralNet(self.tensor_in, config['net_g']['shared'],\n name='shared')\n\n nets['pitch_time_private'] = [\n NeuralNet(nets['shared'].tensor_out,\n config['net_g']['pitch_time_private'],\n name='pt_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['time_pitch_private'] = [\n NeuralNet(nets['shared'].tensor_out,\n config['net_g']['time_pitch_private'],\n name='tp_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['merged_private'] = [\n NeuralNet(tf.concat([nets['pitch_time_private'][idx].tensor_out,\n nets['time_pitch_private'][idx].tensor_out],\n -1),\n config['net_g']['merged_private'],\n name='merged_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['refiner_private'] = [\n NeuralNet(nets['merged_private'][idx].tensor_out,\n config['net_r']['private'],\n slope_tensor=self.slope_tensor,\n name='refiner_private'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n return (tf.concat([nn.tensor_out for nn in nets['private']], -1), nets,\n tf.concat([nn.layers[-1].preactivated\n for nn in nets['private']], -1))", "def build_model(self):\n # Input layers\n\n states = layers.Input(shape=(self.state_size, ), name='states')\n actions = layers.Input(shape=(self.action_size, ), name='actions')\n\n kernel_initializer = initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='normal', seed=None)\n\n # Add hidden layer(s) for state pathway\n net_states = layers.Dense(units=400, activation='elu', kernel_initializer=kernel_initializer)(states)\n\n # Add hidden layer(s) for action pathway\n net_actions = layers.Dense(units=400, activation='elu', kernel_initializer=kernel_initializer)(actions)\n \n # Combine state and action pathways. The two layers can first be processed via separate \n # \"pathways\" (mini sub-networks), but eventually need to be combined.\n net = layers.Add()([net_states, net_actions])\n \n net = layers.Dense(units=300, activation='elu', kernel_initializer=kernel_initializer)(net)\n \n Q_values = layers.Dense(units=1, activation=None, name='q_values',kernel_initializer=kernel_initializer)(net)\n\n # Create keras model\n self.model = models.Model(inputs=[states, actions], outputs=Q_values)\n\n\n ## Add l2 weight decay here\n optimizer = optimizers.Adam()\n self.model.compile(optimizer=optimizer, loss='mse')\n\n # Compute action gradients\n action_gradients = K.gradients(Q_values, actions)\n\n self.get_action_gradients = K.function(\n inputs=[*self.model.input, K.learning_phase()],\n outputs=action_gradients)", "def build_model(self):\n states = layers.Input(shape=(self.state_size,), name='states')\n actions = layers.Input(shape=(self.action_size,), name='actions')\n\n # Hidden Layers for state pathway\n net_states = layers.Dense(units=320, kernel_regularizer=regularizers.l2(0.01), activation='relu')(states)\n net_states = layers.BatchNormalization()(net_states)\n net_states = layers.Dropout(0.25)(net_states)\n net_states = layers.Dense(units=160, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_states)\n net_states = layers.BatchNormalization()(net_states)\n net_states = layers.Dropout(0.25)(net_states)\n net_states = layers.Dense(units=80, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_states)\n net_states = layers.BatchNormalization()(net_states)\n net_states = layers.Dropout(0.25)(net_states)\n net_states = layers.Dense(units=40, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_states)\n net_states = layers.BatchNormalization()(net_states)\n net_states = layers.Dropout(0.25)(net_states)\n\n # Hidden Layer for action pathway\n net_actions = layers.Dense(units=320, kernel_regularizer=regularizers.l2(0.01), activation='relu')(actions)\n net_actions = layers.BatchNormalization()(net_actions)\n net_actions = layers.Dropout(0.25)(net_actions)\n net_actions = layers.Dense(units=160, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_actions)\n net_actions = layers.BatchNormalization()(net_actions)\n net_actions = layers.Dropout(0.25)(net_actions)\n net_actions = layers.Dense(units=80, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_actions)\n net_actions = layers.BatchNormalization()(net_actions)\n net_actions = layers.Dropout(0.25)(net_actions)\n net_actions = layers.Dense(units=40, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_actions)\n net_actions = layers.BatchNormalization()(net_actions)\n net_actions = layers.Dropout(0.25)(net_actions)\n\n # Combine state and action pathways\n net = layers.Add()([net_states, net_actions])\n net = layers.Activation('relu')(net)\n\n # Final Output layer\n Q_values = layers.Dense(units=1, name='q_values')(net)\n\n # Create a Keras Model\n self.model = models.Model(inputs=[states, actions], outputs=Q_values)\n\n # Define Optimizer and Compile the Model\n optimizer = optimizers.Adam(lr=0.0001)\n self.model.compile(optimizer=optimizer, loss='mse')\n\n # Action Gradients (derivative of Q_Value\n action_gradient = K.gradients(Q_values, actions)\n\n # Function to fetch action gradients\n self.get_action_gradients = K.function(\n inputs=[*self.model.input, K.learning_phase()],\n outputs=action_gradient\n )", "def build(self):\n self.build_inputs()\n self.image_embeddings = self.build_image_embeddings(self.images)\n self.seq_embeddings = self.build_seq_embeddings(self.input_seqs)\n self.build_model()\n self.setup_inception_initializer()\n self.setup_global_step()", "def make_fully_connected_network(input_layer, architecture, activation=tf.nn.relu, network_name=''):\n if not architecture: raise AssertionError('no hidden layers in the architecture')\n L = [input_layer]\n W = []\n B = []\n for l, layer_size in enumerate(architecture):\n a, w, b = make_fully_connected_layer(\n L[-1],\n layer_size,\n activation=activation,\n layer_name=network_name + '_layer_' + str(l + 1)\n )\n L.append(a)\n W.append(w)\n B.append(b)\n return L, W, B", "def _build(self):\n with tf.variable_scope (self.name + '_architecutre') as scope:\n images_square = unflatten_layer ( self.images )\n visualize_images(images_square)\n\n # Conv Layer 1\n conv1_out, params = conv_2d_layer ( input = images_square,\n neurons = CONV_1_N,\n filter_size = CONV_1_FILT,\n name = 'enc_conv_1',\n visualize = True )\n process_params(params, name = self.name)\n e1_params = params\n pool1_out = max_pool_2d_layer ( input = conv1_out, name = 'enc_pool_1')\n # lrn1_out = local_response_normalization_layer (pool1_out, name = 'lrn_1' )\n\n # Conv Layer 2\n conv2_out, params = conv_2d_layer ( input = pool1_out,\n neurons = CONV_2_N,\n filter_size = CONV_2_FILT,\n name = 'enc_conv_2' )\n process_params(params, name = self.name)\n e2_params = params\n pool2_out = max_pool_2d_layer ( input = conv2_out, name = 'enc_pool_2')\n # lrn2_out = local_response_normalization_layer (pool2_out, name = 'lrn_2' )\n\n flattened = flatten_layer(pool2_out)\n\n # Dropout Layer 1 \n flattened_dropout = dropout_layer ( input = flattened,\n prob = self.dropout_prob,\n name = 'enc_dropout_1') \n\n # Dot Product Layer 1\n fc1_out, params = dot_product_layer ( input = flattened_dropout,\n neurons = HIDDEN_1,\n name = 'enc_dot_1')\n process_params(params, name = self.name)\n e3_params = params \n\n # Dropout Layer 2 \n fc1_out_dropout = dropout_layer ( input = fc1_out,\n prob = self.dropout_prob,\n name = 'enc_dropout_2')\n # Dot Product Layer 2\n fc2_out, params = dot_product_layer ( input = fc1_out_dropout, \n neurons = HIDDEN_2,\n name = 'enc_dot_2')\n process_params(params, name = self.name)\n e4_params = params \n\n # Dropout Layer 3 \n fc2_out_dropout = dropout_layer ( input = fc2_out,\n prob = self.dropout_prob,\n name = 'enc_dropout_3')\n \n # Dot Product Layer 2\n self.codeword, params = dot_product_layer ( input = fc2_out_dropout, \n neurons = CODEWORD_LENGTH,\n activation = CODE_ACTIVATION,\n name = 'enc_dot_2')\n process_params(params, name = self.name)\n process_codeword_normalization_regularizer(self.codeword, \n coeff = AUTOENCODER_CODEWORD_COEFF,\n name = self.name)\n e5_params = params \n # tf.summary.histogram('codewords', self.codeword)\n # self.hash = threshold_layer ( input = self.codeword,\n # name = 'hash')\n # process_hash_regularizer(self.codeword, coeff = AUTOENCODER_HASH_COEFF,\n # name = self.name)\n\n # Decoder ... \n decoder_1_out, params = dot_product_layer ( input = self.codeword, \n neurons = HIDDEN_2,\n params = [tf.transpose(e5_params[0]), None],\n name = 'decoder_dot_1')\n d1_params = params\n process_params([params[1]], name = self.name)\n \n dec_1_out_dropout = dropout_layer ( input = decoder_1_out,\n prob = self.dropout_prob,\n name = 'dec_dropout_1')\n\n decoder_2_out, params = dot_product_layer ( input = dec_1_out_dropout, \n neurons = HIDDEN_1,\n params = [tf.transpose(e4_params[0]), None],\n name = 'decoder_dot_2')\n d2_params = params\n process_params([params[1]], name = self.name)\n \n # dropout 2\n dec_2_out_dropout = dropout_layer ( input = decoder_2_out,\n prob = self.dropout_prob,\n name = 'dec_dropout_2')\n\n decoder_3_out, params = dot_product_layer ( input = dec_2_out_dropout, \n neurons = 1250,\n params = [tf.transpose(e3_params[0]), None],\n name = 'decoder_dot_3')\n d3_params = params\n process_params([params[1]], name = self.name)\n\n # DeConv Layer 1\n # The output shapes need to be changed according to architecture.\n\n dec_3_square = unflatten_layer ( decoder_3_out, channels = CONV_2_N )\n upsample_1 = upsampling_layer (dec_3_square, size = (10,10), name = 'dec_upsampling_1')\n\n deconv1_out, params = deconv_2d_layer ( input = upsample_1,\n neurons = CONV_1_N,\n filter_size = CONV_2_FILT,\n output_shape = (12,12),\n # n_outs = MINI_BATCH_SIZE,\n stride = (1,1,1,1), \n params = [e2_params[0], None], \n name = 'dec_deconv_1' )\n\n process_params([params[1]], name = self.name)\n d4_params = params\n\n # DeConv Layer 2\n upsample_2 = upsampling_layer (deconv1_out, size = (24,24), name = 'dec_upsampling_2')\n decoded_images_square, params = deconv_2d_layer ( input = upsample_2,\n neurons = 1,\n filter_size = CONV_1_FILT,\n stride = (1,1,1,1),\n output_shape = (28,28),\n # n_outs = MINI_BATCH_SIZE, \n params = [e1_params[0], None], \n activation = 'tanh', \n name = 'dec_deconv_2' )\n \n process_params([params[1]], name = self.name)\n d5_params = params \n \n self.decoded = flatten_layer (decoded_images_square, in_shp = [-1, 28, 28, 1])\n visualize_images(decoded_images_square, name = 'decoded')\n # This is because transpose don't initialize.\n self.params = [ [e5_params[0], d1_params[1] ],\n [e4_params[0], d2_params[1] ],\n [e3_params[0], d3_params[1] ],\n [e2_params[0], d4_params[1] ],\n [e1_params[0], d5_params[1] ] ]\n\n with tf.variable_scope (self.name + '_objectives') as scope: \n with tf.variable_scope( self.name + '_decoder_error') as scope:\n reconstruction_error = rmse(self.images, self.decoded) \n tf.add_to_collection( self.name + '_objectives', reconstruction_error ) \n tf.summary.scalar('reconstruction_error', reconstruction_error)\n\n self._cook_optimizer( \n lr = AUTOENCODER_LR, \n optimizer = AUTOENCODER_OPTIMIZER,\n l1_coeff = AUTOENCODER_L1_COEFF,\n l2_coeff = AUTOENCODER_WEIGHT_DECAY_COEFF)", "def compile_network(model, optimizer):\n compile_network_model(model, optimizer, categorical_crossentropy)", "def _build_graph(self):\n pass", "def build_graph(self):\n for node in self.graph.nodes():\n self.c2py[node] = PyNode(node)\n for _input in node.inputs():\n if _input not in self.c2py:\n self.c2py[_input] = PyNode(_input, True)\n if _input in self.forward_edge:\n self.forward_edge[_input].append(node)\n else:\n self.forward_edge[_input] = [node]\n for output in node.outputs():\n if output not in self.c2py:\n self.c2py[output] = PyNode(output, True)\n if node in self.forward_edge:\n self.forward_edge[node].append(output)\n else:\n self.forward_edge[node] = [output]", "def build(self):\n input_shape_img = (None, None, 3)\n img_input = Input(shape=input_shape_img)\n roi_input = Input(shape=(None, 4))\n shared_layers = self.cnn_model.nn_base(img_input, trainable=True)\n num_anchors = len(self.C.anchor_scales) * len(self.C.anchor_ratios)\n \n output_region_proposal = self.region_proposal_net(shared_layers, num_anchors)\n output_classifier = self.classifier(shared_layers,\n self.cnn_model.classifier_layers, \n roi_input, self.C.num_roi, \n num_class=len(self.class_count), trainable=True)\n \n self.model_region_proposal = Model(img_input, output_region_proposal[:2])\n self.model_classifier = Model([img_input, roi_input], output_classifier)\n self.model_all = Model([img_input, roi_input], output_region_proposal[:2] + output_classifier)\n\n optimizer = Adam(lr=1e-5)\n self.model_region_proposal.compile(optimizer=optimizer, \n loss=[losses.rpn_loss_cls(num_anchors), \n losses.rpn_loss_regr(num_anchors)])\n self.model_classifier.compile(optimizer=optimizer, \n loss=[losses.class_loss_cls, \n losses.class_loss_regr(len(self.class_count)-1)], \n metrics={'dense_class_{}'.format(len(self.class_count)): 'accuracy'})\n self.model_all.compile(optimizer='sgd', loss='mae')\n\n # print(self.model_all.summary())\n plot_model(self.model_region_proposal, show_shapes=True, to_file='./frcnn/images/region_proposal.png')\n plot_model(self.model_classifier, show_shapes=True, to_file='./frcnn/images/classifier.png')\n plot_model(self.model_all, show_shapes=True, to_file='./frcnn/images/model_all.png')", "def __call__(self, inputs, training):\n\n\t\treturn self._build_network(inputs, training)", "def build_graph(self):\n pass", "def build_2net(input_size, output_size, n_hidden=[5, 3]):\n\t# Create network and modules\n\tnet = FeedForwardNetwork()\n\tinp = LinearLayer(input_size)\n\th1 = SigmoidLayer(n_hidden[0])\n\th2 = TanhLayer(n_hidden[1])\n\toutp = LinearLayer(output_size)\n\t# Add modules\n\tnet.addOutputModule(outp)\n\tnet.addInputModule(inp)\n\tnet.addModule(h1)\n\tnet.addModule(h2)\n\t# Create connections\n\tnet.addConnection(FullConnection(inp, h1, inSliceTo=6))\n\tnet.addConnection(FullConnection(inp, h2, inSliceFrom=6))\n\tnet.addConnection(FullConnection(h1, h2))\n\tnet.addConnection(FullConnection(h2, outp))\n\t# Finish up\n\tnet.sortModules()\n\treturn net", "def architecture(self):\n return self._architecture", "def compile(self):\n # create both networks\n self.q_network = self.create_model()\n # self.target_q_network = self.create_model()\n\n # set loss function in both \n adam = Adam(lr=1e-4)\n self.q_network.compile(loss=mean_huber_loss, optimizer=adam) \n # self.target_q_network.compile(loss=mean_huber_loss, optimizer=adam)\n \n # set the same weights for both initially\n # self.target_q_network.set_weights(self.q_network.get_weights())\n \n print self.q_network.summary()", "def _build(self, input_var, name=None):\n out = input_var\n for model in self._models:\n self._last_network = model.build(out, name=name)\n if self._first_network is None:\n self._first_network = self._last_network\n out = self._last_network.outputs\n\n return out", "def build_discriminator(self):\n img_shape = (self.img_size[0], self.img_size[1], self.channels)\n\n model = Sequential()\n ###############\n # Conv Stack 1:\n ###############\n model.add(\n Conv2D(128, kernel_size=5, strides=2, input_shape=img_shape, padding=\"same\")\n ) # 128x128 -> 64x64\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.2))\n\n ###############\n # Conv Stack 2:\n ###############\n model.add(\n Conv2D(128, kernel_size=5, strides=2, padding=\"same\")\n ) # 64x64 -> 32x32\n # model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 3:\n ###############\n model.add(\n Conv2D(128, kernel_size=4, strides=2, padding=\"same\")\n ) # 32x32 -> 16x16\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 4:\n ###############\n model.add(Conv2D(128, kernel_size=4, strides=1, padding=\"same\")) # 16x16 -> 8x8\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 5:\n ###############\n model.add(Conv2D(128, kernel_size=3, strides=1, padding=\"same\")) # 8x8 -> 4x4\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Dropout(0.4))\n\n model.add(Flatten())\n model.add(Dense(1, activation=\"sigmoid\")) # important binary classification.\n\n model.summary()\n\n # Model require Pair.\n img = Input(shape=img_shape)\n validity = model(img)\n\n return Model(img, validity)", "def build_model_mobilenet(num_classes):", "def build(self):\n\n self.W = self.init([self.n_atom_input_feat, self.n_output])\n self.b = model_ops.zeros(shape=[\n self.n_output,\n ])\n\n self.trainable_weights = self.W + self.b", "def assemble(self):\n machineCodeLength = len(self.instructionList)\n # Adds all of the data lengths to the length\n for symbol in self.symbolTable:\n if symbol[\"type\"] == \"DATA\":\n machineCodeLength += symbol[\"length\"]\n # Stores the machine code instructions\n machineCode = [0 for i in range(machineCodeLength)]\n # Adds all DATA symbols to the machineCode\n dataOffset = len(self.instructionList) # Stores the offset into the machine code for the current data symbol\n for symbol in self.symbolTable:\n if symbol[\"type\"] == \"DATA\":\n # Stores the operand into the memory\n\n # Stores the memory location of the data\n symbol[\"pointer\"] = dataOffset\n dataOffset += symbol[\"length\"]\n\n # Assembles every instruction\n for i in range(len(self.instructionList)):\n ins = self.instructionList[i]\n # Constructs the machine code instruction\n machineCode[i] |= (ins['controlBits'] & 0x3F) << 26\n machineCode[i] |= (ins['code'] & 0xFF) << 18\n # Looks through all of the awaiting in the operand and fills in the output for each\n for sym in ins['operand']['awaiting']:\n symType = \"DATA\" if \"DATA\" in sym else \"LABEL\"\n symbolName = sym[symType]['symbol']\n destination = sym[symType]['output']\n # Searches through the symbol table for the symbol\n for symbol in self.symbolTable:\n # Checks if it is a valid symbol\n if symbol[\"type\"] == symType and symbol[\"name\"] == symbolName:\n if symbol[\"type\"] == \"LABEL\":\n ins[\"operand\"][destination] = symbol[\"pointer\"]\n elif symbol[\"type\"] == \"DATA\":\n ins[\"operand\"][destination] = symbol[\"pointer\"]\n ins['operand']['awaiting'] = []\n print(ins)\n # Gets the main operand value\n if ins['operand']:\n if 'operand' in ins['operand']:\n if ins['operand']['operandType'] == 'int':\n machineCode[i] |= (1 << 18) # Sets value mode for the operand\n value = ins['operand']['operand'].to_bytes(4, \"big\")\n machineCode[i] |= value[0] << 12\n machineCode[i] |= value[1] << 8\n machineCode[i] |= value[2] << 4\n machineCode[i] |= value[3]\n elif ins['operand']['operandType'] == 'float':\n machineCode[i] |= (1 << 18) # Sets value mode for the operand\n value = struct.pack('>f', ins['operand']['operand'])\n machineCode[i] |= value[0] << 12\n machineCode[i] |= value[1] << 8\n machineCode[i] |= value[2] << 4\n machineCode[i] |= value[3]\n elif ins['operand']['operandType'] == 'register':\n machineCode[i] |= (ins['operand']['operand'] & 0xF) << 4\n if 'Rin' in ins['operand']: \n # Clears the bits at the location\n machineCode[i] &= 0xFFFFF0FF\n machineCode[i] |= (ins['operand']['Rin'] & 0xF) << 8\n elif 'address' in ins['operand']:\n if ins['operand']['addressingMode'] == \"Absolute\" or ins['operand']['addressingMode'] == \"Indirect\":\n addr = ins['operand']['address'].to_bytes(4, \"big\")\n machineCode[i] |= addr[0] << 12\n machineCode[i] |= addr[1] << 8\n machineCode[i] |= addr[2] << 4\n machineCode[i] |= addr[3]\n if ins['operand']['addressingMode'] == \"Absolute\": machineCode[i] |= 0x0 << 16\n elif ins['operand']['addressingMode'] == \"Indirect\": machineCode[i] |= 0x1 << 16\n\n if ins['operand']['addressingMode'] == \"Register\":\n machineCode[i] |= 0x2 << 16\n machineCode[i] |= ins['operand']['offset']\n if 'Rout' in ins['operand']:\n # Clears the bits at the location\n machineCode[i] &= 0xFFFFF0FF\n machineCode[i] |= (ins['operand']['Rin'] & 0xF) << 8\n else:\n # Clears the bits at the location\n machineCode[i] &= 0xFFFF0FFF\n machineCode[i] |= (ins['operand']['Rin'] & 0xF) << 12\n elif ins['operand']['addressingMode'] == \"RegisterOffset\": \n machineCode[i] |= 0x3 << 16\n\n if 'Rout' in ins['operand']:\n # Clears the bits at the location\n machineCode[i] &= 0xFFFF0FFF\n machineCode[i] |= (ins['operand']['Rout'] & 0xF) << 12\n print(machineCode[i])", "def _build_graph(self):\n with tf.variable_scope(name_or_scope = self.name, reuse = self.reuse):\n if self.strides == 1:\n data_proj, data_path = ly.channel_shuffle(self.input)\n # deal data_path with branch_main\n with tf.variable_scope(name_or_scope = 'branch_main_s1', reuse = self.reuse):\n data_path = ly.conv_bn_activation(data_path, self.mid_channels, 1, self.strides, data_format = self.data_format, is_training = self.is_training, _use_bias = False)\n data_path = ly.depthwise_conv_layer(data_path, self.mid_channels, self.ksize, [1, self.strides, self.strides, 1], data_format = self.data_format)\n data_path = ly._bn(data_path, self.data_format, self.is_training)\n data_path = ly.conv_bn_activation(data_path, self.outputs, 1, self.strides, data_format = self.data_format, is_training = self.is_training, _use_bias = False)\n return tf.concat((data_proj, data_path), axis = -1)\n else:\n data_proj = self.input\n data_path = self.input\n with tf.variable_scope(name_or_scope = 'branch_main_s2', reuse = self.reuse):\n data_path = ly.conv_bn_activation(data_path, self.mid_channels, 1, 1, data_format = self.data_format, is_training = self.is_training, _use_bias = False)\n data_path = ly.depthwise_conv_layer(data_path, self.mid_channels, self.ksize, [1, self.strides, self.strides, 1], data_format = self.data_format)\n data_path = ly._bn(data_path, self.data_format, self.is_training)\n data_path = ly.conv_bn_activation(data_path, self.outputs, 1, 1, data_format = self.data_format, is_training = self.is_training, _use_bias = False)\n with tf.variable_scope(name_or_scope = 'branch_proj_s2', reuse = self.reuse):\n data_proj = ly.depthwise_conv_layer(data_proj, self.inp, self.ksize, [1, self.strides, self.strides, 1], data_format = self.data_format)\n data_proj = ly._bn(data_proj, self.data_format, self.is_training)\n data_proj = ly.conv_bn_activation(data_proj, self.inp, 1, 1, data_format = self.data_format, is_training = self.is_training, _use_bias = False)\n return tf.concat((data_proj, data_path), axis = -1)", "def _build_graph_general(self): \n\n #Find a canonical coloring scheme\n #Each node has a color that is determined by the non-mapped aspects\n nodecolors=set()\n for nl in self.net.iter_node_layers():\n nodecolors.add(self._slice_node_layer_not_allowed(nl))\n nodecolors_sorted=sorted(list(nodecolors))\n del nodecolors\n self._assert_full_order(nodecolors_sorted)\n self.colormap=dict( ((color,colorid) for colorid,color in enumerate(nodecolors_sorted) ))\n\n #each aux node has a color that is determined by the aspect\n self.auxcolormap=dict( ((auxcolor, auxcolorid+len(self.colormap)) for auxcolorid,auxcolor in enumerate(sorted(self.asp)) ) )\n\n\n #Add the underlying network\n #node-layers:\n for nl in self.net.iter_node_layers():\n nlid=self._get_node_id(nl)\n color=self._slice_node_layer_not_allowed(nl)\n colorid=self.colormap[color]\n self.add_node(nlid,colorid)\n\n #edges between node-layers:\n for nl1 in self.net.iter_node_layers():\n for nl2 in self.net[nl1]:\n nl1id=self._get_node_id(nl1)\n nl2id=self._get_node_id(nl2)\n self.add_link(nl1id,nl2id)\n\n\n #Add the auxiliary nodes and edges\n #add the aux nodes\n for a in self.asp:\n for elayer in self.net.slices[a]:\n auxid=self._get_auxnode_id( (a,elayer) )\n auxcolorid=self.auxcolormap[a]\n self.add_node(auxid,auxcolorid)\n \n #add the aux edges\n for nl in self.net.iter_node_layers():\n for a in self.asp:\n nlid=self._get_node_id(nl)\n auxid=self._get_auxnode_id( (a,nl[a]) )\n self.add_link(nlid,auxid)", "def build_model(self):\n self.G = Generator(self.g_conv_dim)\n self.D = Discriminator(self.d_conv_dim, self.c_dim)\n self.generator = Generator(self.g_conv_dim).train(False)\n\n self.G = nn.DataParallel(self.G)\n self.D = nn.DataParallel(self.D)\n\n # For Adam (Unofficial)\n # self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])\n # self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2])\n\n # For RMSprop(Official)\n self.g_optimizer = torch.optim.RMSprop(self.G.parameters(), lr=0.0001)\n self.d_optimizer = torch.optim.RMSprop(self.D.parameters(), lr=0.0001)\n\n self.accumulate(self.generator, self.G.module, 0)\n # self.print_network(self.G, 'G')\n # self.print_network(self.D, 'D')\n \n self.G.to(self.device)\n self.D.to(self.device)\n self.generator.to(self.device)\n\n # weight init\n self.G.apply(self.weights_init)\n self.D.apply(self.weights_init)\n self.generator.apply(self.weights_init)", "def buildNet(inputShape, numUniqueClasses):\n layers = InputLayer((None,) + inputShape[1:4])\n layers = ResidualLayer(layers, 8, \n filter_size = (3,1))\n layers = ResidualLayer(layers, 8, \n filter_size = (3,1), stride= (5,1))\n layers = ResidualLayer(layers, 8, \n filter_size = (3,1))\n layers = ResidualLayer(layers, 1, \n filter_size = (3,1), stride= (3,1))\n layers = NonlinearityLayer(layers, nonlinearity = nonlinearity)\n layers = DropoutLayer(layers,p=.3) \n layers = batch_norm(NNHelpers.LocallyConnected2DLayer(layers,1,(5,1),\n W=He('relu'),\n nonlinearity=nonlinearity)) \n layers = DenseLayer(layers,num_units=numUniqueClasses,\n nonlinearity=linear) \n layers = NonlinearityLayer(layers, nonlinearity=softmax) \n return layers", "def _build_graph(self, train_data, test_data):\n\n # Network for testing / evaluation\n # As before, we define placeholders for the input. These here now can be fed\n # directly, e.g. with a feed_dict created by _evaluation_food\n self.expert_outputs = {m: test_pipeline(test_data[m], self.config['prefixes'][m],\n **self.config)\n for m in self.modalities}\n self.prediction = self._fusion(self.expert_outputs)", "def get_network(network: str, config):\n using_spatial = False # If true input is fed as patches.\n using_attention = False\n patch_return_size = 1\n\n if network == 'cohen':\n model = CohenMLP(seq_len=config.seq_len)\n elif network == 'oksuz_rnn':\n model = OksuzRNN(config.gru, input_size=config.rnn_input_size, hidden_size=config.rnn_hidden_size,\n seq_len=config.seq_len, num_layers=config.rnn_num_layers,\n bidirectional=config.rnn_bidirectional)\n elif network == 'hoppe':\n spatial_pooling = None if config.spatial_pooling.lower() == 'none' else config.spatial_pooling.lower()\n using_spatial = True if spatial_pooling is not None else False\n model = Hoppe(config.gru, input_size=config.rnn_input_size, hidden_size=config.rnn_hidden_size,\n seq_len=config.seq_len, num_layers=config.rnn_num_layers,\n bidirectional=config.rnn_bidirectional, spatial_pooling=spatial_pooling,\n patch_size=config.patch_size)\n elif network == 'rnn_attention':\n using_attention = True\n model = RNNAttention(input_size=config.rnn_input_size, hidden_size=config.rnn_hidden_size,\n batch_size=config.batch_size, seq_len=config.seq_len,\n num_layers=config.rnn_num_layers, bidirectional=config.rnn_bidirectional)\n elif network == 'song':\n using_attention=True\n model = Song(seq_len=config.seq_len)\n elif network == 'soyak':\n using_spatial = True\n patch_return_size = config.patch_size - 2\n model = Soyak(patch_size=config.patch_size, seq_len=config.seq_len)\n elif network == 'patch_size':\n using_spatial = True\n model = PatchSizeTest(seq_len=config.seq_len, patch_size=config.patch_size)\n elif network == 'balsiger':\n using_spatial = True\n model = Balsiger(seq_len=config.seq_len, patch_size=config.patch_size)\n elif network == 'rca_unet':\n using_spatial = True\n patch_return_size = config.patch_size\n using_attention = config.rcab_attention\n model = RCAUNet(seq_len=config.seq_len, patch_size=config.patch_size,\n temporal_features=config.num_temporal_features, attention=config.rcab_attention)\n elif network == 'r2plus1d':\n using_spatial = True\n using_attention = True if config.non_local_level > 0 else False\n model = R2Plus1D(patch_size=config.patch_size, seq_len=config.seq_len, factorise=config.factorise,\n dimensionality_reduction_level=config.dimensionality_reduction_level,\n non_local_level=config.non_local_level)\n elif network == 'r1d':\n model = R1D(seq_len=config.seq_len)\n else:\n import sys # Should not be able to reach here as we provide a choice.\n print(\"Invalid network. Exiting...\")\n sys.exit(1)\n\n return model, using_spatial, using_attention, patch_return_size", "def build(self, input_shape):\n node_embed_shape = input_shape.node_embed\n edge_embed_shape = input_shape.edge_embed\n\n with tf.name_scope('node'):\n with tf.name_scope('U'):\n self.U = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.U.build(node_embed_shape)\n\n with tf.name_scope('V'):\n self.V = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.V.build(node_embed_shape)\n\n with tf.name_scope('norm'):\n self.norm_h = {\n \"batch\": tf.keras.layers.BatchNormalization(),\n \"layer\": tf.keras.layers.LayerNormalization()\n }.get(self.normalization, None)\n if self.norm_h:\n self.norm_h.build(node_embed_shape)\n\n with tf.name_scope('edge'):\n with tf.name_scope('A'):\n self.A = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.A.build(edge_embed_shape)\n \n with tf.name_scope('B'):\n self.B = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.B.build(node_embed_shape)\n\n with tf.name_scope('C'):\n self.C = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.C.build(node_embed_shape)\n\n with tf.name_scope('norm'):\n self.norm_e = {\n 'batch': tf.keras.layers.BatchNormalization(),\n 'layer': tf.keras.layers.LayerNormalization(axis=-1)\n }.get(self.normalization, None)\n if self.norm_e:\n self.norm_e.build(edge_embed_shape)\n \n super().build(input_shape)", "def build(pipe_configs):\n libs = {}\n config = pipe_configs.get_config()\n if \"module_connection\" not in config:\n raise RuntimeError('\"module_connection\" is missing')\n if \"input_connection\" not in config:\n raise RuntimeError('\"input_connection\" is missing')\n if \"param_connection\" not in config:\n raise RuntimeError('\"param_connection\" is missing')\n\n mod_n_configs = config[\"module_connection\"]\n config_len = len(mod_n_configs)\n module_string_config = [{} for _ in range(config_len)]\n # Use hardware configurations to build backend modules for each subgraph.\n for ir_mod, mod_config in mod_n_configs.items():\n pipe_config = mod_config[\"pipeline\"].copy()\n mod_idx = pipe_config[\"mod_idx\"]\n dev = mod_config[\"dev\"]\n target = mod_config[\"target\"]\n build_func = relay.build\n # Callers may need to use a customized building function to wrap the pre-building logic\n # and the backend building logic. For example, in order to support a backend which only\n # can do \"int8\" computation, the caller may need to merge the \"quantization\" logic\n # into the building logic to creat a customized building function.\n if \"build\" in mod_config and mod_config[\"build\"]:\n build_func = mod_config[\"build\"]\n\n lib = build_func(\n ir_mod,\n target,\n params=mod_config[\"params\"],\n target_host=mod_config[\"target_host\"],\n mod_name=mod_config[\"mod_name\"],\n )\n\n pipe_config[\"dev\"] = f\"{dev.device_type},{dev.device_id}\"\n # Use \"mod_idx\" as the key to create a \"module_connection\" map which is not only\n # for the module index but also for the module connection used to build the pipeline.\n module_string_config[mod_idx] = pipe_config\n libs[mod_idx] = {\n \"lib\": lib,\n \"dev\": dev,\n \"fcompile\": mod_config[\"fcompile\"],\n \"export_cc\": mod_config[\"export_cc\"],\n }\n\n # Creating a text form configuration to record the \"input_connection\" and the\n # \"module_connection\" information. The \"input_connection\" is used to record the\n # map of global input and subgraph input, and the \"module_connection\" is used to\n # record module dependency.\n string_config = {}\n string_config[\"param_connection\"] = config[\"param_connection\"]\n string_config[\"input_connection\"] = config[\"input_connection\"]\n string_config[\"module_connection\"] = module_string_config\n\n return PipelineExecutorFactoryModule(libs, string_config)", "def build(config,\n main_prog,\n startup_prog,\n step_each_epoch=100,\n is_train=True,\n is_distributed=True):\n with paddle.static.program_guard(main_prog, startup_prog):\n with paddle.utils.unique_name.guard():\n mode = \"Train\" if is_train else \"Eval\"\n use_mix = \"batch_transform_ops\" in config[\"DataLoader\"][mode][\n \"dataset\"]\n use_dali = config[\"Global\"].get('use_dali', False)\n feeds = create_feeds(\n config[\"Global\"][\"image_shape\"],\n use_mix=use_mix,\n dtype=\"float32\")\n\n # build model\n # data_format should be assigned in arch-dict\n input_image_channel = config[\"Global\"][\"image_shape\"][\n 0] # default as [3, 224, 224]\n model = build_model(config[\"Arch\"])\n out = model(feeds[\"data\"])\n # end of build model\n\n fetchs = create_fetchs(\n out,\n feeds,\n config[\"Arch\"],\n epsilon=config.get('ls_epsilon'),\n use_mix=use_mix,\n config=config,\n mode=mode)\n lr_scheduler = None\n optimizer = None\n if is_train:\n optimizer, lr_scheduler = build_optimizer(\n config[\"Optimizer\"], config[\"Global\"][\"epochs\"],\n step_each_epoch)\n optimizer = mixed_precision_optimizer(config, optimizer)\n if is_distributed:\n optimizer = dist_optimizer(config, optimizer)\n optimizer.minimize(fetchs['loss'][0])\n return fetchs, lr_scheduler, feeds, optimizer", "def create_base_network(NumberOfFeatures, NumberOfClasses,init_mode='glorot_normal'):\n network = Sequential()\n network.add(Dense(44, activation='sigmoid', kernel_initializer=init_mode,input_dim=NumberOfFeatures))\n# network.add(Dense(22, activation='sigmoid',kernel_initializer=init_mode))\n network.add(Dense(NumberOfClasses, activation='softmax',kernel_initializer=init_mode))\n return network", "def test_build_multiple_ansatze(self):\n # TODO: options for adding ansatze together -- keep parameters the same or define new ones?\n # Get the network components\n data = array([[0, 1], [1, 0]])\n cdata = LabeledCData(data, labels=array([1, 0]))\n encoder = BinaryEncoding(cdata)\n layer1 = ProductAnsatz(2)\n layer2 = ProductAnsatz(2)\n measure = Measurement(2, [0])\n\n # Make the network\n qnn = Network([encoder, layer1, layer2, measure], \"2q-qvm\")\n\n # Build a circuit for the network\n net0 = qnn._build(0)\n\n # Checks\n self.assertEqual(type(net0), BaseAnsatz)" ]
[ "0.70051813", "0.66093147", "0.6482595", "0.6386774", "0.62803894", "0.62662494", "0.623352", "0.6173588", "0.6158841", "0.61208886", "0.60760844", "0.6056186", "0.60528004", "0.5953922", "0.59380317", "0.59286124", "0.59246135", "0.5907775", "0.58998877", "0.58750767", "0.5869411", "0.58641815", "0.5825463", "0.582415", "0.5823998", "0.58186793", "0.58110386", "0.5809188", "0.5785144", "0.5782279", "0.5775536", "0.57654166", "0.57654005", "0.575845", "0.5742833", "0.5729312", "0.5714011", "0.56850684", "0.5674727", "0.56698126", "0.5651123", "0.56456953", "0.56439966", "0.56397384", "0.56266326", "0.5601045", "0.5586286", "0.5572867", "0.55626863", "0.5524137", "0.55196935", "0.5515081", "0.55138373", "0.55038995", "0.5500907", "0.5485553", "0.54656994", "0.5464083", "0.54622215", "0.54535204", "0.54467434", "0.5445445", "0.5440301", "0.54291934", "0.5424936", "0.5415132", "0.53967357", "0.53967357", "0.53830194", "0.5379801", "0.53696257", "0.53594756", "0.5354748", "0.5351667", "0.53515786", "0.5350817", "0.5345704", "0.53406006", "0.5338453", "0.533699", "0.5326963", "0.53249913", "0.5319839", "0.5317675", "0.5316891", "0.5314376", "0.5301237", "0.5292368", "0.5289529", "0.52873063", "0.5286105", "0.5280839", "0.5260647", "0.5251288", "0.52482647", "0.5242455", "0.5241666", "0.5240515", "0.523912", "0.5238364", "0.5237786" ]
0.0
-1
Calculate cumulative probability from a list of probabilities
def cum_sum_prob(prob_dict): if not math.isclose(sum(prob_dict.values()), 1, rel_tol=1e-3): ValueError('Input probabilities do not sum to 1.') out = [] cur_sum = 0 for k, v in prob_dict.items(): cur_sum += v out.append((k, cur_sum)) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cumulative_probability_distribution(self):\n return list(accumulate(self.probability_distribution()))", "def cumprob(self):\r\n return self.probabilities.cumsum(-1)", "def custom_pdf(self, cum_probs, values):\n rnd_num = random()\n for p in range(len(cum_probs)):\n if rnd_num < cum_probs[p]:\n return values[p]", "def calc_prob(data):\n total = len(data)\n frequencies = sorted(Counter(data).items())\n probabilities = OrderedDict()\n for (key, value) in frequencies:\n probabilities[key] = value / total\n return probabilities", "def compute_cdf(ordered_weights):\n return numpy.cumsum(ordered_weights) - 0.5 * ordered_weights", "def probit(x):\n from tensorflow_probability import distributions\n return distributions.Normal(0, 1).cdf(x)", "def gen_cumulative_probabilities(node_probabilities, node, adjust_scale=False):\n\n if node_probabilities.size == 0:\n raise Exception('No probability values calculated for {}'.format(node))\n\n if not all(type(val) == np.float64 for val in node_probabilities):\n raise TypeError(\n 'Unexpected type encountered in node probability distribution '\n '{}'.format(node_probabilities)\n )\n\n if np.isnan(np.sum(node_probabilities)):\n raise ValueError(\n 'NaN encountered in node probability distribution '\n '{}'.format(node_probabilities)\n )\n\n if adjust_scale is True:\n total = np.sum(node_probabilities)\n node_probabilities = node_probabilities / total\n\n node_cumulative_probabilities = np.full(node_probabilities.shape, np.nan)\n cumulative_probability = 0\n for index, probability in np.ndenumerate(node_probabilities):\n index = index[0] # Numpy array indices are tuples\n cumulative_probability += probability\n node_cumulative_probabilities[index] = cumulative_probability\n\n if round(node_cumulative_probabilities[-1], 4) != 1.0:\n raise OSError.errno(\n 'ERROR: Cumulative probability = {}'.format(node_cumulative_probabilities[-1])\n )\n\n return node_cumulative_probabilities", "def collapse(probability, input_list):\n\n result = int((probability * len(input_list)) + 0.5)\n return min(result, len(input_list) - 1)", "def choice(some_list, probabilities, max_probability=1):\n x = random.uniform(0, max_probability)\n cumulative_probability = 0.0\n\n for item, item_probability in zip(some_list, probabilities):\n cumulative_probability += item_probability\n if x < cumulative_probability: break\n\n return item", "def get_cumulative(self, search_items, csv=False, output_dir=None, extra_param=None):\n\n # Get data from api and create objects\n api_datas = self.call_api(search_items, \"probability\", \"cumulative\", \"property\", extra_param=extra_param)\n product = [ProbabilityCumulative(api_data) for api_data in api_datas]\n\n if csv:\n csv_format.to_csv(product, \"probability\", \"cumulative\", output_dir=output_dir)\n\n logging.info(\"Probability Cumulative Data Ready.\")\n\n return product", "def cumulative_distribution(self):\n\n cum_dd = []\n sum_p = 0\n for k, p in reversed(self.dd):\n sum_p += p\n cum_dd.append((k, sum_p))\n return list(reversed(cum_dd))", "def probDist(probList):\n\tdiscreteDist = []\n\tinvProbList = [] # 1 - probList\n\tnumEvents = len(probList)\n\tfor i in range(len(probList)): # watch out for floating-point rounding errors\n\t\tinvProbList.append(1-probList[i])\n\tpowerSet = []\n\teventList = [i for i in range(numEvents)]\n\t#print(eventList)\n\tfor i in range(numEvents+1):\n\t\tpowerSet.append(list(combinations(eventList,i)))\n\t#print(powerSet)\n\tfor subSet in powerSet: # subSets are grouped according to size, 0 to numEvents\n\t\t#print(subSet)\n\t\ttotalProb = 0\n\t\tfor subSubSet in subSet: # subSubSets are tuples (the actual subsets of the powerSet)\n\t\t\t#print(subSubSet)\n\t\t\tprob = 1\n\t\t\tfor ix in range(numEvents):\n\t\t\t\tif ix not in subSubSet:\n\t\t\t\t\t#print(ix,'loss')\n\t\t\t\t\tprob *= invProbList[ix]\n\t\t\t\telse:\n\t\t\t\t\t#print(ix,'win')\n\t\t\t\t\tprob *= probList[ix]\n\t\t\ttotalProb += prob\n\t\tdiscreteDist.append(totalProb)\n\treturn discreteDist", "def condprob(self, occur):\n cond = []\n for i in range(len(occur)):\n cond.append((occur[i] + 1) / (sum(occur) + len(occur)))\n return cond", "def get_cumulative_rewards(rewards, # rewards at each step\n gamma=0.99 # discount for reward\n ):\n\n cumulative_rewards = np.empty_like(rewards)\n cumulative_rewards = cumulative_rewards.astype(float)\n cumulative_rewards[-1] = rewards[-1]\n\n for index in range(len(rewards) - 2, -1, -1):\n discount = cumulative_rewards[index + 1] * gamma\n reward = rewards[index]\n cumulative_rewards[index] = discount + reward\n\n return cumulative_rewards # <array of cumulative rewards>", "def cum_reward(self, reward_list):\n reward = 0.\n for rew in reward_list[::-1]:\n reward += rew * self.gamma\n return reward", "def pareto_distribution(v, p=0.8):\n thr = np.sum(v)*p\n cumsum = 0\n for i, _v in enumerate(v, 1):\n cumsum += _v\n if cumsum >= thr:\n return i * 1.0 / len(v)", "def probability(distances):\n v = [1.0/(d + 1) for d in distances]\n s = sum(v)\n return [i/s for i in v]", "def getProbabilityDistribution(probEnter = 0.8):\n idealTimer = probEnter * 5\n initialProbs = [0.05, 0.05, 0.05, 0.05, 0.05]\n for i in range(5):\n if idealTimer <= i + 1:\n initialProbs[i] = 0.6\n if i == 0:\n initialProbs[1] = 0.2\n initialProbs[2] = 0.1\n elif i == 4:\n initialProbs[3] = 0.2\n initialProbs[2] = 0.1\n else:\n initialProbs[i + 1] = 0.15\n initialProbs[i - 1] = 0.15\n\n return initialProbs", "def _generate_p(self):\n self._values, weights = zip(*self._weights.items())\n cumsum = list(itertools.accumulate(weights))\n total = cumsum[-1]\n self._p = [i / total for i in cumsum]", "def get_cumulative_rewards(rewards, # rewards at each step\r\n gamma=0.99 # discount for reward\r\n ):\r\n cumulative_rewards = []\r\n prev = 0\r\n\r\n for r in reversed(rewards):\r\n prev = r + gamma * prev\r\n cumulative_rewards.append(prev)\r\n cumulative_rewards.reverse()\r\n return cumulative_rewards", "def cumprobs(self, values):\n values = np.asarray(values)\n index = np.searchsorted(self.xs, values, side='right')\n ps = self.ps[index-1]\n ps[values < self.xs[0]] = 0.0\n return ps", "def sample_discrete(probs):\r\n q = np.random.rand()\r\n i = 0\r\n p_sum = 0.0\r\n while p_sum < q:\r\n p_sum += probs[i]\r\n i += 1\r\n return i - 1", "def get_probs(self, a):\n with torch.no_grad():\n probabilities = (np.array(self.priorities) ** a) / sum(np.array(self.priorities) ** a)\n return probabilities", "def int_with_probability(list_of_values):\n sum_of_values = sum(list_of_values)\n\n # pick a random value from 0 to sum\n r = random.randrange(0, sum_of_values)\n new_sum = 0\n\n for item in list_of_values:\n new_sum += item\n if new_sum >= r:\n return item", "def find_probability(problist, listoffive):\n\tprobs = []\n\tfor i in listoffive:\n\t\tprobs.append(problist[i])\n\ttotprob = 1\n\tfor n in probs:\n\t\ttotprob = totprob * n\n\treturn totprob", "def calc_probs(log_p):\n\n N = log_p.shape[0]\n\n log_Z_per_N = np.zeros(shape=(N, 1))\n\n for i in range(N):\n\n log_Z_per_N[i] = log_norm(log_p[i])\n\n log_p_new = log_p - log_Z_per_N\n\n p = np.exp(log_p_new)\n\n # log_Z = log_norm(log_p)\n\n # p = np.exp(log_p - log_Z)\n\n return p", "def probability(s, a, b):\r\n return s.cdf(b) - s.cdf(a)", "def conditional_probability(data, attr, cp_table):\n # gets class names for dataframe manipulation\n classes = attr.tail(1)['vars'].tolist()\n classlist = [classes[0][0], classes[0][1]]\n class0 = classlist[0]\n class1 = classlist[1]\n # number of instances beloning to each class\n nclass0 = cp_table.loc[0, class0].sum()\n nclass1 = cp_table.loc[0, class1].sum()\n total = nclass0 + nclass1\n # all probabilities include a laplace est of 1\n prior0 = (nclass0 + 1) / (total + 2)\n prior1 = (nclass1 + 1) / (total + 2)\n list0 = []\n list1 = []\n for index, row in cp_table.iterrows():\n numattr = len(attr.loc[index, 'vars'])\n numer0 = row[class0] + 1\n numer1 = row[class1] + 1\n denom0 = nclass0 + (1 * numattr)\n denom1 = nclass1 + (1 * numattr)\n cp0 = numer0 / denom0\n cp1 = numer1 / denom1\n list0.append(cp0)\n list1.append(cp1)\n # replacing columns in previous table with cond probs\n del cp_table[class0]\n del cp_table[class1]\n cp_table[class0] = list0\n cp_table[class1] = list1\n \n return cp_table, prior0, prior1", "def value_to_cumulative_prob(value, hp):\n if isinstance(hp, Fixed):\n return 0.5\n if isinstance(hp, Boolean):\n # Center the value in its probability bucket.\n if value:\n return 0.75\n return 0.25\n elif isinstance(hp, Choice):\n ele_prob = 1 / len(hp.values)\n index = hp.values.index(value)\n # Center the value in its probability bucket.\n return (index + 0.5) * ele_prob\n elif isinstance(hp, (Int, Float)):\n sampling = hp.sampling or 'linear'\n if sampling == 'linear':\n return (value - hp.min_value) / (hp.max_value - hp.min_value)\n elif sampling == 'log':\n return (math.log(value / hp.min_value) /\n math.log(hp.max_value / hp.min_value))\n elif sampling == 'reverse_log':\n return (\n 1. - math.log((hp.max_value + hp.min_value - value) / hp.min_value) /\n math.log(hp.max_value / hp.min_value))\n else:\n raise ValueError('Unrecognized sampling value: {}'.format(sampling))\n else:\n raise ValueError('Unrecognized HyperParameter type: {}'.format(hp))", "def cumulative_distribution(self, X):\n raise NotImplementedError", "def comp_cum_distribution(xs,norm=True,rank=False,data_range='data',pdf=None):\n cdf = cum_density_func(xs,norm,rank,data_range,pdf)\n max_v = np.max(cdf.values())\n return dict([(k,max_v - cdf[k]) for k in cdf.keys()])", "def _ppndf(cum_prob):\n SPLIT = 0.42\n A0 = 2.5066282388\n A1 = -18.6150006252\n A2 = 41.3911977353\n A3 = -25.4410604963\n B1 = -8.4735109309\n B2 = 23.0833674374\n B3 = -21.0622410182\n B4 = 3.1308290983\n C0 = -2.7871893113\n C1 = -2.2979647913\n C2 = 4.8501412713\n C3 = 2.3212127685\n D1 = 3.5438892476\n D2 = 1.6370678189\n # ====== preprocess ====== #\n cum_prob = np.array(cum_prob)\n eps = np.finfo(cum_prob.dtype).eps\n cum_prob = np.clip(cum_prob, eps, 1 - eps)\n adj_prob = cum_prob - 0.5\n # ====== init ====== #\n R = np.empty_like(cum_prob)\n norm_dev = np.empty_like(cum_prob)\n # ====== transform ====== #\n centerindexes = np.argwhere(np.abs(adj_prob) <= SPLIT).ravel()\n tailindexes = np.argwhere(np.abs(adj_prob) > SPLIT).ravel()\n # do centerstuff first\n R[centerindexes] = adj_prob[centerindexes] * adj_prob[centerindexes]\n norm_dev[centerindexes] = adj_prob[centerindexes] * \\\n (((A3 * R[centerindexes] + A2) * R[centerindexes] + A1) * R[centerindexes] + A0)\n norm_dev[centerindexes] = norm_dev[centerindexes] /\\\n ((((B4 * R[centerindexes] + B3) * R[centerindexes] + B2) * R[centerindexes] + B1) * R[centerindexes] + 1.0)\n #find left and right tails\n right = np.argwhere(cum_prob[tailindexes] > 0.5).ravel()\n left = np.argwhere(cum_prob[tailindexes] < 0.5).ravel()\n # do tail stuff\n R[tailindexes] = cum_prob[tailindexes]\n R[tailindexes[right]] = 1 - cum_prob[tailindexes[right]]\n R[tailindexes] = np.sqrt((-1.0) * np.log(R[tailindexes]))\n norm_dev[tailindexes] = ((\n (C3 * R[tailindexes] + C2) * R[tailindexes] + C1) * R[tailindexes] + C0)\n norm_dev[tailindexes] = norm_dev[tailindexes] / (\n (D2 * R[tailindexes] + D1) * R[tailindexes] + 1.0)\n # swap sign on left tail\n norm_dev[tailindexes[left]] = norm_dev[tailindexes[left]] * -1.0\n return norm_dev", "def find_all_cps(xs, cp_prob=1./250, plot=False):\r\n prior_params = mu0, kappa0, alpha0, beta0 = np.mean(xs), 1., 1.01, 1.\r\n post_params = mu_t, kappa_t, alpha_t, beta_t = map(lambda f: np.array([f]), prior_params)\r\n\r\n T = len(xs)\r\n R, M, V = np.zeros((T, T)), np.zeros((T, T)), np.zeros((T, T))\r\n R[0, 0] = 1\r\n M[0, 0] = mu0\r\n V[0, 0] = xs.var()\r\n\r\n mu_pred, sigma2_pred, dof_pred = compute_t_params(mu_t, kappa_t, alpha_t, beta_t)\r\n for t, x in enumerate(xs[1:], start=1):\r\n pred_prob = np.array([nct(x, m, v, d) for m, v, d in zip(mu_pred, sigma2_pred, dof_pred)])\r\n\r\n R[:t + 1, t] = compute_rt(R[:t, t - 1], pred_prob, cp_prob)\r\n\r\n post_params = mu_t, kappa_t, alpha_t, beta_t = update_params(x, prior_params, post_params)\r\n mu_pred, sigma2_pred, dof_pred = compute_t_params(mu_t, kappa_t, alpha_t, beta_t)\r\n\r\n M[:t + 1, t] = mu_pred\r\n V[:t + 1, t] = compute_t_var(sigma2_pred, dof_pred)\r\n\r\n if plot:\r\n mu_hat = np.sum(M*R, axis=0)\r\n var_hat = np.sum(V*R, axis=0)\r\n plot_results(xs, mu_hat, var_hat)\r\n\r\n return R, M, V", "def probability(cpts, term, obs):\r\n \r\n \r\n # term is a list e.g., ['x_1', '0']\r\n # flip refers to the assignment either '0' false or '1' true\r\n flip = term[1]\r\n # the term itself\r\n term = term[0]\r\n # accumulator variable\r\n answer = 0\r\n # this loop locates where in the CPT we're looking\r\n for clause in range(len(cpts)):\r\n if cpts[clause][0] == term:\r\n index = clause\r\n # focus on our term\r\n cpt = cpts[index]\r\n # this loop checks if there are no preconditions\r\n # if not, then we immediately know the probability and can return\r\n for m in range(len(cpt[1])):\r\n if cpt[1][m][-2][1] == '1':\r\n if cpt[1][m][0] == [[]]:\r\n answer = cpt[1][m][-1]\r\n # list of the variables we have observed\r\n have = []\r\n if obs != []:\r\n for k in obs:\r\n have.append(k[0])\r\n # list of variables we need to know in order to calculate the probability\r\n needed = []\r\n for prob in range(len(cpt[1])):\r\n for j in cpt[1][prob][0]:\r\n if j != []:\r\n if j[0] not in needed:\r\n needed.append(j[0])\r\n # conditional logic based on the known variables\r\n for required in needed:\r\n if required not in have:\r\n # deep copy our observations list\r\n obs2 = []\r\n obs3 = []\r\n for observs in obs:\r\n obs2.append(observs)\r\n obs3.append(observs)\r\n # if we need to know a variable but don't have it\r\n # then we allow it to be either 0 or 1\r\n obs3.append([required,'1'])\r\n obs2.append([required,'0'])\r\n # computes probability if the unknown term is true, times \r\n # the probability that the unknown term is true, plus the\r\n # probability if the unknown term is false, times the \r\n # probability that the unknown term is false\r\n answer = (probability(cpts, [term,flip], obs3) * probability(cpts, [required,'1'], obs)) + (probability(cpts, [term,flip], obs2) * (probability(cpts, [required,'0'], obs)))\r\n # this loop looks complicated but all it's doing is finding the correct\r\n # line in the CPT\r\n if cpt[1][prob][-2][1] == '1':\r\n count = 1\r\n for i in range(len(cpt[1][prob][0])):\r\n if cpt[1][prob][0][i] in obs:\r\n count *= 1\r\n else:\r\n count = 0\r\n if count == 1:\r\n answer += cpt[1][prob][-1]\r\n\r\n\r\n # this computes the probability that the term is true, so if we asked \r\n # for the probability that it is false, just return 1 - answer\r\n if flip == '0':\r\n return 1 - answer\r\n return answer", "def discrete_rv(p):\n u = np.random.uniform()\n cdf = np.cumsum(p)\n j = np.searchsorted(cdf, u)\n return j", "def entropy(probabilities):\n return -(sum([p * log(p, 2) if p > 0 else 0 for p in probabilities]))", "def probabilities(self):\n raise NotImplementedError", "def prob4():\n\n\n N = 500000\n random_draws = np.random.multivariate_normal(mean = [-1,1], cov =[[1,0],[0,1]], size = N)\n\n h = lambda x: x[0] < -1 and x[1] > 1\n f = lambda x: stats.multivariate_normal(mean = [ 0, 0]).pdf(x)\n g = lambda x: stats.multivariate_normal(mean = [-1, 1]).pdf(x)\n\n probability = [h(random_draws[i]) * f(random_draws[i]) / g(random_draws[i]) for i in range(N)]\n\n return 1./N * np.sum(probability)", "def _cumulative_sum(xs):\r\n cumsum = 0\r\n for x in xs:\r\n cumsum += x\r\n yield cumsum", "def cdf(weights):\r\n\treturn np.cumsum(weights) / sum(weights)", "def get_bprop_cumprod(self):\n cumprod = P.CumProd(exclusive=self.exclusive, reverse=self.reverse)\n cumsum = P.CumSum(exclusive=self.exclusive, reverse=not self.reverse)\n\n def bprop(x, axis, out, dout):\n \"\"\"Grad definition for `Product` operation.\"\"\"\n # This will fails when x contains 0\n prod = cumprod(x, axis)\n out = cumsum(prod * dout, axis)\n return out / x, zeros_like(axis)\n return bprop", "def calc_invest(prob):\n if prob == 1.0:\n prob = 0.999\n elif prob == 0.0:\n prob = 0.001\n\n signal = (prob - (1.0 / Consts.NUM_OF_CLASSES)) / ((prob * (1.0 - prob)) ** 0.5)\n\n res = (2 * norm.cdf(signal) - 1)\n\n return res", "def estimate_cumulative_distribution(sample, threshold):\n sample_size = len(sample)\n if sample_size == 0:\n raise ValueError(\"Sample cannot be empty when estimating cumulative distribution\")\n count = 0\n for item in sample:\n if item <= threshold:\n count = count + 1\n return count / sample_size", "def sample_probabilities(pvals: np.ndarray) -> Callable[[], int]:\n\n return Sampler(np.cumsum(pvals))", "def priorProbabilities():\r\n\ttotal = 0.0\r\n\tpos = 0.0\r\n\tneg = 0.0\r\n\r\n\t# Count the amount of positives and negatives in the training data\r\n\tfor item in trainingData:\r\n\t\ttotal += 1\r\n\t\tif item[1] == '0':\r\n\t\t\tpos +=1\r\n\t\tif item[1] == '1':\r\n\t\t\tneg +=1\r\n\t\t\t\r\n\t\t\t\r\n\t# Return the positive and negative probabilities \r\n\tposProb = float(pos / total * 100)\r\n\tnegProb = float(neg / total * 100)\r\n\r\n\t\r\n\t\r\n\treturn posProb, negProb", "def prodi(items: Iterable[float]) -> float:\n p: float = 1\n for n in items:\n p *= n\n return p", "def multinomial_prob(counts, probs):\n return nCkarray(*counts.values) * (probs ** counts).prod()", "def calc_probabilities(applications):\n sum_advantage = sum(app.get_advantage() for app in applications)\n return [app.get_advantage() / sum_advantage for app in applications]", "def probability(prods, prod_dict_As, count_dict):\n for p in prods:\n if p not in prod_dict_As:\n raise Exception(\"Think we cannot make the product {}.\".format(p))\n # Argh, Python, this is a reference!\n #possible_As = prod_dict_As[prods[0]]\n possible_As = set( prod_dict_As[prods[0]] )\n for p in prods[1:]:\n possible_As &= prod_dict_As[p]\n ret = []\n for A in possible_As:\n count = 1\n for p in prods:\n count *= count_dict[(p,A)]\n ret.append((A,count))\n return ret", "def evaluate_probabilities(self, batches):\n total_batches = batches.batches_per_epoch()\n catprobs = []\n for batch in range(total_batches):\n X_batch, y_batch = batches.get_batch()\n feed_dict = {\n self.x: X_batch,\n self.y: y_batch,\n self.keep_prob: 1.0}\n fetch_dict = {\n \"catprobs\": self.categorical_probabilities}\n result = self.session.run(fetch_dict, feed_dict)\n catprobs.append(result[\"catprobs\"])\n catprobs = np.concatenate(catprobs)\n return catprobs", "def probability(series, params):\n\n prob = 1\n\n for result in series:\n\n prob *= params[result]\n\n return prob * params[\"die\"]", "def generate_probabilities(self):\n k = 1\n v= 10\n for g in self.class_probabilities:\n curr_list = self.class_probabilities[g]\n for l in range(0,28):\n for w in range(0,28):\n total = float(curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2])\n curr_list[l][w][0] = (float(curr_list[l][w][0])+k)/(total + k*v) \n curr_list[l][w][1] = (float(curr_list[l][w][1])+k)/(total + k*v)\n curr_list[l][w][2] = (float(curr_list[l][w][2])+k)/(total + k*v)\n curr_list[l][w][3] = curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2]", "def get_bprop_cumsum(self):\n cumsum = P.CumSum(exclusive=self.exclusive, reverse=not self.reverse)\n\n def bprop(x, axis, out, dout):\n return cumsum(dout, axis), zeros_like(axis)\n return bprop", "def sample_list_item(\n x: List[Any],\n probs: Optional[np.ndarray],\n random_state: RandomState\n) -> Any:\n\n if probs is None:\n probs = np.repeat(1 / len(x), len(x))\n\n cdf_y_rand = random_state.random_sample()\n\n cum_probs = probs.cumsum()\n final_cum_prob = cum_probs[-1]\n\n if abs(1.0 - final_cum_prob) > 0.0000001:\n raise ValueError(f'Expected cumulative probabilities to sum to 1, but got {final_cum_prob} instead.')\n\n x_i = next(\n i\n for i, cum_prob in enumerate(cum_probs)\n if cdf_y_rand < cum_prob\n )\n\n return x[x_i]", "def CumulativeDistribution(data, nbins, range=None, normed=True, centerbins=False):\n\n # 1) COMPUTE THE DISTRIBUTION OF THE DATA\n ydata, xdata = np.histogram(data, nbins, range, normed)\n\n # 1.1) Compute the cumulative sum of the probability\n ydata = ydata.cumsum()\n\n # 2) RETURN THE RESULTS\n if centerbins:\n dif = 0.5 * (xdata[-1] - xdata[0]) / nbins\n xdata += dif\n\n if normed:\n norm = 1.0 / ydata[-1]\n ydata *= norm\n\n return xdata[:-1], ydata\n\n else:\n return xdata[:-1], ydata", "def discounted_cumulative_gain(ranked_list):\n total_ndcg = 0\n for query in ranked_list:\n relevances = [doc[0] for doc in query[1]]\n dcg = 0\n for i, rel in enumerate(relevances, 1):\n dcg += rel / (math.log(i + 1, 2))\n\n idcg = 0\n for i, rel in enumerate(sorted(relevances, reverse=True), 1):\n idcg += rel / (math.log(i + 1, 2))\n\n total_ndcg += dcg / idcg\n return total_ndcg / len(ranked_list)", "def chance(n, p):\n total = 0.0\n for k in range(n+1):\n total += comb(n, k, exact=False) * p**k * (1-p) ** (n-k)\n return total", "def probability_s(self, s, c):\n return sum([self.get_likelihood(c, w) for w in s]) + self.prior_probability[c]", "def MAP(cpts, obs, terms):\r\n\r\n # a list to store the computed probabilities\r\n all_sums = []\r\n # initialize all terms to false\r\n for value in range(len(terms)):\r\n terms[value] = [terms[value], '0']\r\n search_array = terms + obs\r\n # if all terms are being watched, just call MPE\r\n if len(search_array) == len(cpts):\r\n return MPE(cpts, obs)\r\n # we need to know what terms we aren't interested in so we start with \r\n # or terms and observations and note the variables that appear in CPT but\r\n # not in those\r\n dont_count = []\r\n for var in cpts:\r\n if [var[0], '0'] not in search_array and [var[0], '1'] not in search_array:\r\n dont_count.append(var[0])\r\n terms.append([var[0],'1'])\r\n # sort the terms to ensure correct ordering\r\n terms.sort()\r\n # creates a list of all possible bit strings\r\n # just an easy way to create all possible truth assignments\r\n seq = [\"\".join(seq) for seq in itertools.product(\"01\", repeat=len(terms))]\r\n # loop through all possible truth assignments\r\n for j in range(len(seq)):\r\n # we initialize at probability = 100%\r\n chance = 1\r\n # assign the truth values\r\n for k in range(len(seq[j])):\r\n terms[k][1] = seq[j][k]\r\n # this computes the probability using the chaining rule\r\n for i in range(len(terms)):\r\n new_terms = terms[:-i-1] + obs\r\n new_terms.sort()\r\n chance *= probability(cpts,terms[-i-1], new_terms)\r\n # add the probabilities to our list\r\n all_sums.append(chance)\r\n combine = []\r\n # note all variables which weren't in obs or Vs\r\n for i in dont_count:\r\n combine.append(terms.index([i,'1']))\r\n # this will store the final probabilities\r\n final_array = [0] * len(seq)\r\n # another complicated looking loop, it just serves to combine probabilities\r\n # for example, if we have a CPT with x_1, x_2, x_3, x_4 and we observe \r\n # x_1 to be true and have Vs = [x_3, x_4] then we need to combine the \r\n # probabilities that are the same except for x_2 = true vs false\r\n for loc in combine:\r\n for sequence in range(len(seq)):\r\n for alt_sequence in range(sequence+1,len(seq)):\r\n if (seq[sequence][:loc] + seq[sequence][loc+1:]) == (seq[alt_sequence][:loc] + seq[alt_sequence][loc+1:]):\r\n final_array[sequence] = all_sums[sequence] + all_sums[alt_sequence]\r\n\r\n # get the truth assignment for the highest probability\r\n location = seq[final_array.index(max(final_array))]\r\n truth_assignment = []\r\n # place the truth assignment in a more readable fashion\r\n for value in range(len(terms)):\r\n if terms[value] in search_array:\r\n if location[value] == '0':\r\n truth_assignment.append(terms[value][0]+ ' = False')\r\n else:\r\n truth_assignment.append(terms[value][0]+ ' = True')\r\n return (truth_assignment)", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n # So we need to determine for every input state-action pair, what the resulting policy distribution is\n # This means that the input will be a single state and a single action per index. \n # We then need to determine if, according to our policy, the action should be taken (prob=1) \n # or not (prob=0)\n \n # state is a tuple of (player's current sum, dealer's single showing card, boolean for usable ace)\n probs = []\n for index, (state, action) in enumerate(zip(states, actions)):\n chosen_action = self.sample_action(state)\n if action == chosen_action:\n probs.append(1)\n else:\n probs.append(0)\n \n \n return np.array(probs)", "def average_probabilities(probabilities):\n result = sum_probabilities(probabilities)\n occurrences = {}\n for probability in probabilities:\n for key in probability.keys():\n if key not in occurrences.keys():\n occurrences[key] = 0\n occurrences[key] = occurrences[key] + 1\n for key in result.keys():\n result[key] = result[key] / occurrences[key]\n return result", "def chance_points(dice_list):\n return sum(dice_list)", "def probability(freqlst):\n\tproblist = []\n\ttotal = 0\n\ttotes = 0\n\tfor elem in freqlst:\n\t\ttotal = total + elem\n\tfor item in freqlst:\n\t\tprob = item / total\n\t\tproblist.append(prob)\n\tfor la in problist:\n\t\ttotes = totes + la\n\treturn problist", "def initial_probabilities_from_trajectories(n_states, trajectories):\n p = np.zeros(n_states)\n\n for t in trajectories:\n p[t.transitions()[0][0]] += 1.0\n\n return p / len(trajectories)", "def cond_prob(self, token, prev_tokens=()):\n n = self._n\n lambdas = [0] * n\n res = 0\n for i in range(0, n - 1):\n lambdas[i] = (1 - sum(lambdas[:i])) * float(self.count(prev_tokens[i:])) / (\n float(self.count(prev_tokens[i:])) + self._gamma)\n res = res + lambdas[i] * self.sub_cond_prob(token, prev_tokens[i:])\n lambdas[n - 1] = (1 - sum(lambdas))\n assert (sum(lambdas) == 1.0)\n if (self._addone):\n return float(res + lambdas[n - 1] * self.sub_cond_prob_addone(token))\n else:\n return float(res + lambdas[n - 1] * self.sub_cond_prob(token))", "def compute_policy_gradient_causality(triple_pi_0, policy_prob_ratio, grad_log, reward_list):\n with torch.no_grad():\n policy_prob_ratio_list = np.fromiter((policy_prob_ratio[triple_pi_0[i][0]][triple_pi_0[i][1]] for i in range(triple_pi_0.shape[0])), float)\n\n # cumprod it\n policy_prob_cumprod = np.cumprod(policy_prob_ratio_list)\n\n # cumsum it, then reverse the order\n reward_reverse_cumsum = np.cumsum(reward_list[::-1])[::-1]\n\n # along the trajectory, apply each step\n policy_gradient = torch.sum(\n torch.stack(\n [(grad_log[triple_pi_0[i][0]][triple_pi_0[i][1]]*\\\n policy_prob_cumprod[i]*\\\n reward_reverse_cumsum[i])\n for i in range(triple_pi_0.shape[0])]\n ),\n dim = 0)\n\n return policy_gradient", "def col_prob(P, X=alph):\n\n return sum([P[x] ** 2 for x in X])", "def _uniform_order_statistic_cdf(i, n, x):\n return betainc(i, n-i+1, x)", "def prob_calculate(self, tokens):\n\n prob = 0\n for x in range(0, len(tokens) - self.order - 1):\n prompt = tuple(tokens[x:x + self.order])\n if prompt in self.transitions:\n next_token = tokens[x + self.order]\n values = self.transitions[prompt]\n prob += (values.count(next_token))/len(values)\n\n return prob", "def _discounted_cumsum(self, rewards, rate=None):\n # HINT1: note that each entry of the output should now be unique,\n # because the summation happens over [t, T] instead of [0, T]\n # HINT2: it is possible to write a vectorized solution, but a solution\n # using a for loop is also fine\n rate = self.gamma if rate is None else rate\n\n rewards = np.array(rewards)\n disounted_return = list(\n accumulate(rewards[::-1], lambda ret, rew: rate * ret + rew))\n disounted_return = np.array(disounted_return)[::-1]\n return disounted_return", "def cumulative_prob_to_value(prob, hp):\n if isinstance(hp, Fixed):\n return hp.value\n elif isinstance(hp, Boolean):\n return bool(prob >= 0.5)\n elif isinstance(hp, Choice):\n ele_prob = 1 / len(hp.values)\n index = math.floor(prob / ele_prob)\n return hp.values[index]\n elif isinstance(hp, (Int, Float)):\n sampling = hp.sampling or 'linear'\n if sampling == 'linear':\n value = prob * (hp.max_value - hp.min_value) + hp.min_value\n elif sampling == 'log':\n value = hp.min_value * math.pow(hp.max_value / hp.min_value, prob)\n elif sampling == 'reverse_log':\n value = (hp.max_value + hp.min_value -\n hp.min_value * math.pow(hp.max_value / hp.min_value, 1 - prob))\n else:\n raise ValueError('Unrecognized sampling value: {}'.format(sampling))\n\n if hp.step is not None:\n values = np.arange(hp.min_value, hp.max_value + 1e-7, step=hp.step)\n closest_index = np.abs(values - value).argmin()\n value = values[closest_index]\n\n if isinstance(hp, Int):\n return int(value)\n return value\n else:\n raise ValueError('Unrecognized HyperParameter type: {}'.format(hp))", "def probability_from_internal(internal_values, constr):\n return internal_values / internal_values.sum()", "def proba_from_log_odds(self, log_odds):\n return (1/(1 + math.exp(log_odds)))", "def calculate_prior_probability(y):\n unique, counts = np.unique(y, return_counts=True)\n u_c = dict(zip(unique, counts))\n instances = len(y)\n for u in u_c:\n u_c[u] = float(u_c[u] / instances)\n return u_c", "def sum_probabilities(probabilities):\n result = {}\n for probability in probabilities:\n for key in probability.keys():\n if key not in result.keys():\n result[key] = 0\n result[key] = result[key] + probability[key]\n return result", "def update_probs(vertexNum, deltaFs, candidateSet, vertexProbs):\n\n sum = 0\n candidateDeltas = 0\n for candidate in candidateSet:\n candidateDeltas = candidateDeltas + deltaFs[candidate]\n\n i = 1\n while i <= vertexNum:\n vertexProb = deltaFs[i] / candidateDeltas\n\n if vertexProb < 0:\n vertexProbs[i] = 0\n else:\n vertexProbs[i] = vertexProb\n\n sum = sum + vertexProbs[i]\n i = i + 1", "def probchoice(V, d, obs=[]):\n\n #d = 0.01\n #obs = []\n #V = array([0., 0., 0.2, 0.2, 0.2, 0.4])\n\n #top = [exp(d*v) for v in V]\n top = exp(V * (1./d))\n\n #print top\n #print dummy\n\n # set the value of any prior observations to zero\n for i in range(len(obs)): top[obs[i][0]] = 0.\n\n bottom = sum(top)\n cp = [t/bottom for t in top]\n\n r = random()\n #print r\n #print cumsum(cp)\n\n return where((1*(r < cumsum(cp)))==1)[0][0]\n\n #return sum(1*(random() < cumsum(cp)))-1", "def joint_probabilities_from_transitions(ordered_pitch_types, transition_counts):\n first_pitch_totals = {first_pitch_type: sum(transition_counts[first_pitch_type].values())\n for first_pitch_type in ordered_pitch_types}\n\n total_transitions = sum(first_pitch_totals.values())\n\n markov_rows = []\n joint_probabilities = {}\n\n for first_pitch_type in ordered_pitch_types:\n first_pitch_transitions = transition_counts[first_pitch_type]\n joint_probabilities[first_pitch_type] = {}\n first_pitch_type_probability = float(first_pitch_totals[first_pitch_type]) / total_transitions\n\n second_pitch_total = sum(first_pitch_transitions.values())\n row = [first_pitch_type]\n\n for second_pitch_type in ordered_pitch_types:\n if second_pitch_total == 0:\n second_pitch_conditional_probability = 0\n\n else:\n second_pitch_conditional_probability = \\\n float(first_pitch_transitions[second_pitch_type]) / second_pitch_total\n\n row.append(second_pitch_conditional_probability)\n\n joint_probabilities[first_pitch_type][second_pitch_type] = \\\n first_pitch_type_probability * second_pitch_conditional_probability\n\n markov_rows.append(row)\n\n return joint_probabilities, markov_rows, total_transitions", "def _calculate_tail_probability(self, x, rate):\n return 1 - stats.poisson.cdf(x - 1, rate)", "def _compute_model_prob(self, per_list_logodds):\n with tf.compat.v1.name_scope(name='compute_model_prob'):\n return tf.stop_gradient(\n tf.exp(-self._alpha *\n (per_list_logodds -\n tf.reduce_min(per_list_logodds, axis=2, keepdims=True))))", "def filtro_probs(prediccion,p_min):\n clases = []\n for probabilidad in prediccion:\n if probabilidad[1]>=p_min:\n clases.append(probabilidad)\n else:\n clases.append(\"-\")\n return clases", "def calc_prob_prior(iterations, lam):\n return list(map(lambda x: math.exp(-lam * x), range(iterations)))", "def cum_density_func(xs,norm=True,rank=False,data_range='data',pdf=None):\n if pdf is None:\n pdf = prob_density_func(xs,False,data_range)\n pdfk = sorted(pdf.keys())\n pdfv = map(pdf.get,pdfk)\n if not rank:\n cdfv = np.cumsum(pdfv)\n if norm:\n cdfv = cdfv/np.sum(pdfv)\n else:\n cdfv = np.arange(1,len(pdfk)+1)\n if norm:\n cdfv = cdfv/float((len(pdfk)+1))\n return dict(zip(pdfk,cdfv))", "def get_probs(self, *vars):\n freqs = self.freq_counts([self.data.get_column_view(v)[0] for v in vars], [len(v.values) for v in vars])\n k = np.prod([len(v.values) for v in vars])\n return (freqs + self.alpha) / (np.sum(freqs) + self.alpha*k)", "def h(self, probs):\n\n return np.sum(-p*np.log2(p) if p > 0 else 0 for p in np.nditer(probs))", "def priorProb(self, state):\n actions = []\n for i in range(0, 10):\n actions.append(((i, i+1), random.uniform(0, 1))) \n \n return actions", "def cumulative_sum(self, lis):\n new_list = []\n for i in range(len(lis)):\n if i == 0:\n new_list.append(lis[i])\n else:\n new_list.append(new_list[i-1] + lis[i])\n return new_list", "def selection_profiles_by_chance(true, compare):\n n_neurons, M = true.shape\n probabilities = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n n = np.count_nonzero(true[neuron])\n N = np.count_nonzero(compare[neuron])\n rv = hypergeom(M=M, n=n, N=N)\n\n overlap = np.count_nonzero(true[neuron] * compare[neuron])\n probabilities[neuron] = 1 - rv.cdf(x=overlap)\n\n return probabilities", "def safe_cumprod(x, eps):\n return torch.exp(exclusive_cumsum(torch.log(torch.clamp(x, min=eps, max=1.0))))", "def prob3():\n\n h = lambda x: x > 10\n\n N = range(5000,500001, 5000)\n\n estimates = []\n\n for n in N:\n random_draw = np.random.gamma(9, scale = 0.5, size = n)\n\n estimate = 1./n * np.sum(h(random_draw))\n estimates.append(estimate)\n\n # arrayify it\n estimates = np.array(estimates)\n\n m = 1 - stats.gamma(a = 9, scale = 0.5).cdf(10)\n \n y = abs(estimates - m)\n y_2 = abs(prob2() - m)\n\n plt.plot(N,y)\n plt.plot(N,y_2)\n\n plt.show()", "def cond_prob(self, event, context):\n count = self.table[event, context] + self.prior\n norm = self.margin[context] + (self.prior * len(self.alphabet))\n return count / norm", "def _computeCondProb(self, testData, classValue):\n classAttrObj = self._classAttrs[classValue]\n frequencyDict = classAttrObj.frequencyDict\n totalDocsInClass = classAttrObj.totalDocsInClass\n\n result = (totalDocsInClass/self._totalTrainDocs) # P(c)\n # Compute P(t|c) for each t in d\n for word in testData:\n result *= ((frequencyDict.get(word, 0) + 1) / (sum(frequencyDict.values()) + self._sizeOfVocabulary))\n return result", "def _basic_probability(count: int, sequence_total_count: int) -> float:\n return float(count) / sequence_total_count", "def prob_distr(self, x):\n return 1.0/x", "def get_cumulative_distribution(self):\n srt_dists = np.sort(self._distances)\n tol = 1E-3\n for i in range(1, len(srt_dists)):\n while srt_dists[i] - srt_dists[i-1] < tol:\n srt_dists[i] += tol\n return {\"x\": srt_dists-srt_dists[0], \n \"P\": np.linspace(0.0, 1.0, len(self._distances), endpoint=False)}", "def uniform_start_probs(self) -> np.ndarray:\n return np.ones(self.n_states) / self.n_states", "def sdd(events,probs):\n \n import random\n nprobs=[x*1000 for x in probs] #so, here i multiply each float in 'probs' by 1000 and store the products in 'nprobs'\n newlist=[]\n for a in range(len(events)) : #then, in this loop, i create a list (newlist), in which each event appears 1000*its probability times\n b=nprobs[a]\n b=int(b)\n for c in range(b) :\n newlist.append(events[a]) \n return (random.choice(newlist)) #and finally, i ramdonly sample ", "def profit_curve(cost_benefit, predicted_probs, labels):\n n_obs = float(len(labels))\n # Make sure that 1 is going to be one of our thresholds\n maybe_one = [] if 1 in predicted_probs else [1] \n thresholds = maybe_one + sorted(predicted_probs, reverse=True)\n profits = []\n for threshold in thresholds:\n y_predict = predicted_probs >= threshold\n confusion_matrix = standard_confusion_matrix(labels, y_predict)\n threshold_profit = np.sum(confusion_matrix * cost_benefit) / n_obs\n profits.append(threshold_profit)\n return np.array(profits), np.array(thresholds)", "def get_probs(self):\n\t\tprobArray = []\n\t\tfor combination in self.codepool:\n\t\t\tif self.feasible(combination):\n\t\t\t\tprobArray.append(self.get_probability(combination))\n\t\t\telse:\n\t\t\t\tprobArray.append(0)\n\t\tprobArray = np.array(probArray) / np.sum(probArray)\n\t\treturn probArray", "def update_probs(flip,prob_coins,coins):\n if flip == 'H':\n joint_prob_sum = 0\n for x in range(len(prob_coins)):\n joint_prob_sum += (prob_coins[x] * coins[x])\n new_prob_coins = []\n for x in range(len(prob_coins)):\n new_prob_coin = prob_coins[x] * coins[x] / joint_prob_sum\n new_prob_coins.append(new_prob_coin)\n return new_prob_coins\n else:\n joint_prob_sum = 0\n for x in range(len(prob_coins)):\n joint_prob_sum += (prob_coins[x] * (1-coins[x]))\n new_prob_coins = []\n for x in range(len(prob_coins)):\n new_prob_coin = (prob_coins[x] * (1-coins[x])) / joint_prob_sum\n new_prob_coins.append(new_prob_coin)\n return new_prob_coins" ]
[ "0.7402393", "0.6922577", "0.6584128", "0.6549101", "0.65145403", "0.63271004", "0.62952936", "0.6243871", "0.6198528", "0.6177253", "0.61713296", "0.6147572", "0.6130507", "0.61138874", "0.610889", "0.60820377", "0.60630465", "0.6000701", "0.59468305", "0.5934774", "0.59281653", "0.59118944", "0.59045416", "0.59000486", "0.5884273", "0.5868584", "0.5859634", "0.5855069", "0.58514565", "0.57851785", "0.5770528", "0.5768314", "0.5755839", "0.5741827", "0.57224494", "0.57212", "0.5718476", "0.57162595", "0.56901485", "0.5689714", "0.5682294", "0.56790096", "0.5671515", "0.56713426", "0.5666429", "0.5659013", "0.5656351", "0.5653249", "0.5649915", "0.56469375", "0.5643264", "0.56413674", "0.5632773", "0.563018", "0.5629546", "0.5611504", "0.56093895", "0.56014943", "0.55996466", "0.55985373", "0.5588139", "0.5584314", "0.55764425", "0.554463", "0.5536638", "0.5532242", "0.552708", "0.5521007", "0.5519818", "0.551132", "0.5509173", "0.5508245", "0.5504808", "0.5497974", "0.54970735", "0.5487898", "0.5486082", "0.5479544", "0.546941", "0.5460613", "0.54563606", "0.54417026", "0.54395956", "0.5436808", "0.54304934", "0.54291826", "0.54284656", "0.5412086", "0.54085", "0.5402774", "0.53978705", "0.53964114", "0.5395017", "0.53934866", "0.53893447", "0.5383367", "0.5374579", "0.5373056", "0.53705454", "0.5370232" ]
0.68047005
2
Select an item random with given discrete pdf
def select_item_with_prob(items_prob, n_inst): items = [] for i in range(n_inst): pick_prob = np.random.uniform() values, probs = zip(*cum_sum_prob(items_prob)) idx = bisect_left(probs, pick_prob) items.append(values[idx]) return items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def randomly_drawn_via_pdf_gen_from(self, total_samples: int):\r\n\r\n def func_gen(fit: af.Fit, total_samples: int) -> List[object]:\r\n samples = fit.value(name=\"samples\")\r\n\r\n return [\r\n self.object_via_gen_from(\r\n fit=fit,\r\n galaxies=samples.draw_randomly_via_pdf().galaxies,\r\n )\r\n for i in range(total_samples)\r\n ]\r\n\r\n func = partial(func_gen, total_samples=total_samples)\r\n\r\n return self.aggregator.map(func=func)", "def prob_choice(p):\n \n return np.random.random_sample() < p", "def select_item(items, weights, k):\n x = random.choices(items, weights=weights, k=k)\n return x", "def custom_pdf(self, cum_probs, values):\n rnd_num = random()\n for p in range(len(cum_probs)):\n if rnd_num < cum_probs[p]:\n return values[p]", "def random_sample(prob):\n def _random_sample_xducer(step):\n def _random_sample_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n return step(r, x) if random() < prob else r\n return _random_sample_step\n return _random_sample_xducer", "def Chose_rand():\r\n total_list=list(range(1,467681))\r\n select=13788\r\n random_selected= random.sample(total_list,select)\r\n return (random_selected)", "def get_Sample(self, values, probabilities):\r\n return choices(values,probabilities)\r\n # return np.random.choice(values,p=probabilities)\r", "def sample_discrete(probs):\r\n q = np.random.rand()\r\n i = 0\r\n p_sum = 0.0\r\n while p_sum < q:\r\n p_sum += probs[i]\r\n i += 1\r\n return i - 1", "def get_random_discrete(m, n):\n\n return np.random.choice([-1.0,1.0], size=(m,n))", "def selection(probs):\n # pick 2 parents out of this distribution\n t = [i for i in range(len(probs))]\n draw = choice(t, 2, p=probs, replace=False)\n return draw", "def target_pdf(p, disttype):\n me, cov = target_params(disttype)\n if disttype == 'round' or disttype == 'correlated':\n prob = multivariate_normal.pdf(p, mean=me, cov=cov)\n elif disttype == 'bimodal' or disttype == 'close_bimodal':\n prob0 = multivariate_normal.pdf(p, mean=me[0], cov=cov)\n prob1 = multivariate_normal.pdf(p, mean=me[1], cov=cov)\n prob = max([prob0, prob1]) \n \n return prob", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def random_value(self, selected_vals):\n pass", "def probchoice(V, d, obs=[]):\n\n #d = 0.01\n #obs = []\n #V = array([0., 0., 0.2, 0.2, 0.2, 0.4])\n\n #top = [exp(d*v) for v in V]\n top = exp(V * (1./d))\n\n #print top\n #print dummy\n\n # set the value of any prior observations to zero\n for i in range(len(obs)): top[obs[i][0]] = 0.\n\n bottom = sum(top)\n cp = [t/bottom for t in top]\n\n r = random()\n #print r\n #print cumsum(cp)\n\n return where((1*(r < cumsum(cp)))==1)[0][0]\n\n #return sum(1*(random() < cumsum(cp)))-1", "def touching_choice(self,p):\n choose = random.sample(part,2)\n\n return choose", "def sample_pagerank(corpus, damping_factor, n):\n all_pages = []\n first_sample_prob = random.randint(0, len(corpus) - 1)\n distribution_count = dict()\n\n for u in corpus:\n distribution_count[u] = 0\n all_pages.append(u)\n\n sample = all_pages[first_sample_prob]\n for i in range(n - 1): # n - 1 because first sample was already calculated\n selection_bucket = dict()\n selection_start = 0.0\n sample_distribution = transition_model(corpus, sample, damping_factor)\n sample_prob = random.random()\n for u in sample_distribution:\n floor = selection_start\n ceiling = selection_start + sample_distribution[u]\n selection_start = ceiling\n selection_bucket[u] = [floor, ceiling]\n for u in selection_bucket:\n v = selection_bucket[u]\n if v[0] < sample_prob < v[1]:\n sample = u\n distribution_count[u] += 1\n distribution = dict()\n for u in distribution_count:\n distribution[u] = float(distribution_count[u]) / n\n\n return distribution", "def sample(self):\n return self.items[self.np_random.choice(len(self.items))]", "def pick_random(items: Iterable[T]) -> Optional[T]:\n\n # Choose to remember the `i`th element from the stream with probability\n # `1/i`. Then probability of choosing the `i`th element is\n #\n # P(i) = (1 / i) * (1 - 1 / (i + 1)) * (1 - 1 / (i + 2)) * ...\n # ... * (1 - 1 / n) =\n # = (1 / i) * ((i + 1 - 1) / (i + 1)) * ((i + 2 - 1) / (i + 2) * ...\n # ... * ((n - 1) / n) =\n # = (1 / i) * (i / (i + 1)) * ((i + 1) / (i + 2)) * ...\n # ... * ((n - 1) / n) =\n # = 1 / n\n\n cur_item: Optional[T] = None\n\n for i, item in enumerate(items, start=1):\n if random() <= 1 / i:\n cur_item = item\n\n return cur_item", "def touching_choice(self,p):\n\n part = ['head', 'foot1', 'foot2', 'foot3', 'foot4', 'back', 'stomach', 'tail']\n if len(self.select[p]) == 0:\n return random.sample(part,2)\n elif len(self.select[p]) == 1:\n part.remove(self.select[p][0])\n c = random.sample(part,1)\n return [self.select[p][0], c[0]]\n else:\n return random.sample(self.select[p],2)", "def sample_pagerank(corpus, damping_factor, n):\n first_page = random.choice(list(corpus))\n model = transition_model(corpus, first_page, DAMPING)\n\n for i in range(n):\n\n choosen = random.random()\n total = 0\n\n for k, v in model.items():\n total += v\n\n if choosen <= total:\n page = k\n break\n \n model = transition_model(corpus, page, DAMPING)\n \n return model", "def _sample_using_random(\n self,\n p: float = 0.1,\n ):\n return sa.func.random() < p", "def sample(self):\n\n # pick sample type according to probability\n samplers = [\"unif\", \"geo\", \"diverse\"]\n sample_idx = np.random.multinomial(\n 1, [self.unif_prob, self.geo_prob, self.diverse_prob])\n idx = np.argmax(sample_idx)\n sampler = samplers[idx]\n\n if sampler == \"unif\":\n return self.unif_sampler()\n if sampler == \"geo\":\n return self.geo_sampler()\n if sampler == \"diverse\":\n return self.diverse_sampler()", "def random_select(population, lamda):\n fitness_population = []\n for i in population:\n f_i = fitness_function(i, lamda)\n fitness_population.append(f_i)\n pList = selection_probability(fitness_population)\n rnd_indices = np.random.choice(len(population), p=pList)\n choice = population[rnd_indices]\n return choice", "def _randomize_one(p, v):\n if any(p.endswith(s) for s in ('_pd_n', '_pd_nsigma', '_pd_type')):\n return v\n else:\n return np.random.uniform(*parameter_range(p, v))", "def post(self, s):\n return np.random.choice(self.sample_list)", "def get_discrete_distribution():\n random_int = random.randint(1, 4)\n if (random_int == 1) | (random_int == 2):\n return 0\n if random_int == 3:\n return 3\n if random_int == 4:\n return 5\n raise ValueError(\"Unable to generate discrete distribution with \", random_int)", "def choose(raw_freq):\n\t\t# Build a map of accumulated frequencies to words\n\t\tacc = itertools.accumulate(raw_freq.values())\n\t\tlookup = jaraco.collections.RangeMap(zip(acc, raw_freq))\n\n\t\t# choose a random word proportional - to do that, pick a\n\t\t# random index from 1 to the total.\n\t\t_, total = lookup.bounds()\n\t\treturn lookup[random.randint(1, total)]", "def discrete_rv(p):\n u = np.random.uniform()\n cdf = np.cumsum(p)\n j = np.searchsorted(cdf, u)\n return j", "def word_selection(dictionary):\n total_sum = 0 # {'two': 4, 'red': 4} total_sum == 8\n cumulative_prob = 0.0\n\n for item in dictionary:\n total_sum += dictionary[item]\n\n random_num = random.uniform(0, 1)\n for value in dictionary:\n cumulative_prob += float(dictionary[value]) / float(total_sum)\n if cumulative_prob >= random_num:\n return value", "def sample(probs):\n\n probs = probs / probs.sum()\n return np.random.choice(np.arange(len(probs)), p=probs.flatten())", "def prob4():\n\n\n N = 500000\n random_draws = np.random.multivariate_normal(mean = [-1,1], cov =[[1,0],[0,1]], size = N)\n\n h = lambda x: x[0] < -1 and x[1] > 1\n f = lambda x: stats.multivariate_normal(mean = [ 0, 0]).pdf(x)\n g = lambda x: stats.multivariate_normal(mean = [-1, 1]).pdf(x)\n\n probability = [h(random_draws[i]) * f(random_draws[i]) / g(random_draws[i]) for i in range(N)]\n\n return 1./N * np.sum(probability)", "def select_action(self, state):\n\n if state in self.Q:\n prob = self.get_probs(self.Q[state])\n else:\n prob = np.ones(self.nA) / self.nA\n return np.random.choice(np.arange(self.nA), p = prob)", "def getRandom(self) -> int:\n # print(self.ind)\n return choice(self.items)", "def choice(population,weights):\r\n\tassert len(population) == len(weights)\r\n\tcdf_vals=cdf(weights)\r\n\treturn population[bisect.bisect(cdf_vals, random.random())]", "def test_discrete_distribution():\n rng = utils.RandomState(0)\n distribution = dist.DiscreteDistribution(rng)\n with pytest.raises(NotImplementedError):\n distribution.sample([])\n with pytest.raises(NotImplementedError):\n distribution.log_probability([], None)\n with pytest.raises(NotImplementedError):\n distribution.support([])", "def get_random_approx_discrete(m,n):\n\n return np.random.choice([-0.99,0.99], size=(m,n))", "def __init__(self, pdf: Union[Callable[[Union[float, np.ndarray]], Union[float, np.ndarray]], str],\n sample_size: int, seed: float = None, **kwargs):\n super().__init__()\n\n np.random.seed(seed)\n assert callable(pdf) | isinstance(pdf, str), 'Probability density function must be string or callable'\n self.pdf = pdf\n assert isinstance(sample_size, int), 'Sample size has to be specified as an integer'\n self.sample_size: int = np.random.poisson(lam=sample_size, size=1)\n self.inverse_transformation: bool = isinstance(pdf, str)\n self.r_sample: Optional[np.ndarray] = None\n self.z_sample: Optional[np.ndarray] = None\n self.kwargs: dict = kwargs\n\n if not self.inverse_transformation and (\n quad(self.pdf, 0, 1)[0] > 1.0001 or quad(self.pdf, 0, 1)[0] < 0.9999):\n warn('Supplied pdf function is not a proper pdf function as it integrates to {}, running'\n ' normalization'.format(quad(self.pdf, 0, 1)[0]), RuntimeWarning)\n normalize = quad(self.pdf, 0, 1)[0]\n pdf_tmp: Callable = self.pdf\n del self.pdf\n self.pdf = lambda x: pdf_tmp(x) / normalize\n self.pdf = np.vectorize(self.pdf)", "def sample_pagerank(corpus, damping_factor, n):\n probabilities = dict()\n samples = []\n\n # Random first sample\n page = random.choice(list(corpus.keys()))\n samples.append(page)\n \n # Remaining samples after first\n for i in range(n-1):\n p = transition_model(corpus, page, damping_factor)\n page = random.choices(list(p.keys()), weights=list(p.values()), k=1)[0]\n samples.append(page)\n\n # Count\n for p in corpus.keys():\n probabilities[p] = samples.count(p) / n\n\n return probabilities", "def sample(cdf):\n p = rand()\n #this line is for rounding errors which will cause binary_search to return\n #an index that is out of bounds\n if p == 1.0:\n return cdf[-1]\n else:\n return binary_search(cdf, p)", "def pick(self, target: int) -> int:\n\t\tans = None\n cnt = 0\n for i, x in enumerate(self.nums): \n if x == target: \n cnt += 1\n if randint(1, cnt) == cnt: ans = i # prob 1/cnt\n return ans", "def sample_discrete_pmf(X, PM, N):\n\n assert np.isclose(np.sum(PM), 1.0)\n assert all(0.0 <= p <= 1.0 for p in PM)\n \n y = np.zeros(N)\n cumulativePM = np.cumsum(PM) # build CDF based on PMF\n offsetRand = np.random.uniform(0, 1) * (1 / N) # offset to circumvent numerical issues with cumulativePM\n comb = np.arange(offsetRand, 1 + offsetRand, 1 / N) # new axis with N values in the range ]0,1[\n \n j = 0\n for i in range(0, N):\n while comb[i] >= cumulativePM[j]: # map the linear distributed values comb according to the CDF\n j += 1\t\n y[i] = X[j]\n \n return rd.permutation(y) # permutation of all samples", "def sample(self, probabilities):\n return self.sample_bernoulli(probabilities)", "def sel_random(individuals, size, replacement=False):\r\n if extra.is_numpy(individuals):\r\n return [np.random.choice(individuals, replace=replacement) for _ in range(size)]\r\n else:\r\n if replacement:\r\n return random.choices(individuals, k=size)\r\n else:\r\n return random.sample(individuals, k=size)", "def plot_random_sample(pattern, num_to_select, row_no, col_no, c_map=\"viridis\"):\n mpl.rc(\"image\", cmap=c_map)\n all_images = get_image_paths(pattern)\n sampled_img = get_rand_img(num_to_select, all_images)\n plot_grid(row_no, col_no, sampled_img)", "def sample(self):\n return gc.rand_state.choice(self.domain)", "def sample_from(self, p):\n return np.searchsorted(np.cumsum(p), np.random.rand())", "def choose_number_random_category(self):\n self.view.choose_number_random_category()", "def p_selection(p_init, it, n_iters):\n it = int(it / n_iters * 10000)\n\n if 10 < it <= 50:\n p = p_init / 2\n elif 50 < it <= 200:\n p = p_init / 4\n elif 200 < it <= 500:\n p = p_init / 8\n elif 500 < it <= 1000:\n p = p_init / 16\n elif 1000 < it <= 2000:\n p = p_init / 32\n elif 2000 < it <= 4000:\n p = p_init / 64\n elif 4000 < it <= 6000:\n p = p_init / 128\n elif 6000 < it <= 8000:\n p = p_init / 256\n elif 8000 < it <= 10000:\n p = p_init / 512\n else:\n p = p_init\n\n return p", "def weightedrandomchoice(items): # {{{2\n total = 0\n items.sort(reverse=True, key=lambda x:x[0])\n for item in items:\n total += item[0]\n threshold = random.uniform(0, 0.6) * total\n for item in items:\n threshold -= item[0]\n if threshold <= 0:\n return item[1]", "def lf():\n return random.sample(font_list, 25)", "def bernoulli_trial(p: float) -> int:\n return 1 if random.random() < p else 0", "def _sample_pos(self, assign_result, num_expected, **kwargs):\n pos_inds = torch.nonzero(assign_result.gt_inds > 0)\n if pos_inds.numel() != 0:\n pos_inds = pos_inds.squeeze(1)\n if pos_inds.numel() <= num_expected:\n repeat_ = num_expected // pos_inds.numel()\n return torch.cat((pos_inds.repeat(repeat_), self.random_choice(pos_inds, num_expected % pos_inds.numel())))\n else:\n return self.random_choice(pos_inds, num_expected)", "def selected_choice(self):\r\n choice = zeros(self.num_agents)\r\n random_numbers = self.generate_random_numbers()\r\n\r\n self.prob_cumsum = self.cumprob().filled(-1)\r\n\r\n for i in range(self.num_choices):\r\n # Indicator for the zero cells in the choice array\r\n #indicator_zero_cells = ones(self.num_agents)\r\n indicator = array([True]*self.num_agents)\r\n\r\n zero_indices = choice == 0\r\n #indicator_zero_cells[~zero_indices] = ma.masked\r\n indicator[~zero_indices] = False\r\n\r\n # Indicator for the cells where the random number\r\n # is less than the probability\r\n #indicator_less_cells = ones(self.num_agents)\r\n #indicator_less_cells = array([True]*self.num_agents)\r\n less_indices = random_numbers < self.prob_cumsum[:,i]\r\n #indicator_less_cells[~less_indices] = ma.masked\r\n #indicator_less_cells\r\n indicator[~less_indices] = False\r\n\r\n\r\n #indicator_less_zero_cells = indicator_zero_cells + indicator_less_cells\r\n\r\n #indicator_less_zero_cells = indicator_less_zero_cells == 2\r\n\r\n choice[indicator] = i + 1\r\n\r\n choice.shape = (self.num_agents, 1)\r\n\r\n #alt_text = []\r\n #for i in choice:\r\n # alt_text.append(self.choices[int(i[0])-1])\r\n #alt_text = array(alt_text)\r\n #alt_text.shape = (self.num_agents, 1)\r\n\r\n #return alt_text\r\n #print choice\r\n return DataArray(choice, ['selected choice'])", "def sample(a, p):\n if (len(a) != len(p)):\n raise Exception('a != p')\n p = np.array(p)\n p = p / p.sum()\n r = random.random()\n n = len(a)\n total = 0 # range: [0,1]\n for i in xrange(n):\n total += p[i]\n if total > r:\n return a[i]\n return a[i]", "def easy_sample(self, num, **kwargs):\n return self.preprocess(self.sample(num, **kwargs), **kwargs)", "def _gen_pert(self, count, **kwargs):\n self._check_pert(**kwargs)\n pert = FairBetaPert(**kwargs)\n rvs = pert.random_variates(count)\n return rvs", "def generate_products(self = random.sample, name = random.choice(result), price = random.randint(5, 100), weight = random.randint(5, 100), \nflammability= random.uniform(0, 2.5)):\n return sample", "def uniform_selection(random, population, args):\r\n num_selected = args.setdefault('num_selected', 1)\r\n selected = []\r\n for _ in range(num_selected):\r\n selected.append(population[random.randint(0, len(population)-1)])\r\n return selected", "def test_sampling(distribution):\n print(\"TESTING: sampling for %s distribution\" % distribution)\n params = dist.DISTRIBUTIONS[distribution][dist.KEY_TEST_PARAMS]\n print(\" input parameters: %s\" % dist.get_params(params, distribution))\n print(\" creating pdf\")\n test_pmf = dist.pmf(distribution, params)\n print(\" generating samples\")\n test_sample_pmf = dist.get_sample_pmf(dist.samples(distribution, params))\n test_distribution = []\n for i in range(len(test_pmf)):\n if i < len(test_sample_pmf):\n test_distribution.append([i, test_pmf[i], test_sample_pmf[i]])\n else:\n test_distribution.append([i, test_pmf[i], 0.0])\n test_output = 'TEST-SAMPLE.CSV'\n print(\" printing results in %s\" % test_output)\n utils.print_csv(test_output, ['value', 'probability', 'relative_frequency'], test_distribution)", "def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b/np.sum(b, 1)[:,None]", "def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b/np.sum(b, 1)[:,None]", "def test_random_selection(values, ignore_freq):\n # The sequence of selected values when selecting from a list with 10 elements\n # and a seed of 42 is 1, 0, 4, 3, ...\n r = RandomSelect(seed=42, ignore_freq=ignore_freq)\n # The returned constant function is expected to return 1 for all arguments.\n f = r.prepare(values)\n assert f(0) == 1\n assert f(1) == 1\n # The returned constant function is expected to return 1 for all arguments.\n f = r.prepare(values)\n assert f(0) == 0\n assert f(1) == 0", "def my_random_choice(choices):\n def getKey(x):\n return x[0] + 0.001 * x[1]\n\n return min(choices, key=getKey)\n\n # for actual random selection, we may replace the above with this:\n #return random.choice(choices)", "def sample_pagerank(corpus, damping_factor, n):\n samples = list()\n pagerank = dict()\n\n # First sample generated by choosing from a page at random\n samples.append(random.choice(list(corpus)))\n\n for _ in range(n - 1):\n distribution = transition_model(corpus, samples[-1], damping_factor)\n\n # random.choices returns a list, so we exctract needed string from it\n sample = random.choices(list(corpus), weights=[distribution[page] for page in distribution], k=1)[0]\n samples.append(sample)\n\n # Dict w/ how many times each page been sampled\n sampling_result = Counter(samples)\n\n # Get the probability of link being clicked\n for link in corpus:\n pagerank[link] = sampling_result[link] / n\n\n # Sorting pagerank by keys\n pagerank = dict(sorted(pagerank.items()))\n \n return pagerank", "def sample_pagerank(corpus, damping_factor, n):\n data = []\n\n #Choosing a random page from the corpus and adding to data\n sample = random.choice(list(corpus.keys()))\n data.append(sample)\n\n for _ in range(n-1):\n prob_distrib = transition_model(corpus, sample, damping_factor)\n\n #Choosing a page from the corpus based on transition model and adding to data\n sample = np.random.choice(list(prob_distrib.keys()), p=list(prob_distrib.values()))\n data.append(sample)\n\n #Dividing the number of times each page was visited by numebr of samples \n pagerank = {k : v/n for k, v in Counter(data).items()}\n\n return pagerank", "def probability(prob):\n return random.random() <= prob", "def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b / np.sum(b, 1)[:, None]", "def _select(self):\n sel = []\n\n # choose randomly while favouring fit individuals\n lp = len(self.population) // 2\n for _ in range(lp):\n idx1 = idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n while idx1 == idx2:\n idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n\n p1, p2 = self.population[idx1], self.population[idx2]\n sel.append((p1, p2))\n\n return sel", "def draw_bs_sample(data):\n return rg.choice(data, size=len(data))", "def __call__(self, sample):\n image, landmarks = sample['image'], sample['landmarks']\n choices = ((0, 1, 2), (0, 2, 1), (1, 0, 2),\n (1, 2, 0), (2, 1, 0), (2, 0, 1))\n p = random.random()\n if p <= 0.5:\n idx = random.randint(0, 5)\n swap = choices[idx]\n image = image[:, :, swap]\n return {'image': image,\n 'landmarks': landmarks}", "def draw_random_sample(choices, probabilities, n):\n values = np.array(range(len(choices)))\n probs = np.array(probabilities)\n bins = np.add.accumulate(probs)\n inds = values[np.digitize(random_sample(n), bins)]\n samples = []\n for i in inds:\n samples.append(deepcopy(choices[int(i)]))\n return samples", "def bernoulli(p):\r\n if np.random.random() < p:\r\n return 0\r\n else:\r\n return 1", "def __call__(self):\n return random.choice(self.fakers)", "def direct_sample(self, trial_count):\n count = 0\n\n for i in xrange(trial_count):\n values = {}\n\n for letter in self.letters:\n prob = self.variables[letter].get_prob(values)\n values[letter] = self.sample(prob)\n\n if values[self.query.variable]:\n count += 1\n\n return float(count) / trial_count", "def pdf(self,x):\n return self.categoricalDist.pdf(x)", "def _sample(preds, temperature=1.0):\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)", "def choose_option(self, state):\n options = [o for o in self.options if o.initiation_set[state] == 1]\n return random.choice(options)", "def sample_control(Pi, t):\n\n uvec, pvec = zip(*[(pi[t], pval) for pi, pval in Pi.items()\n if len(pi) > t])\n pvec = np.array(pvec) / sum(pvec)\n u = np.random.choice(uvec, p=pvec)\n\n return u", "def sel_random(\n l: list,\n f: int = None,\n seed: int = None,\n ) -> list:\n\n # Initialisations\n sel = []\n l_temp = l[:]\n\n # Check if how many numbers to select is not zero\n if f is not None:\n\n # Determine how many numbers to select from the given number\n n_sel = f\n\n else:\n\n if seed is not None:\n\n numpy.random.seed(seed = seed)\n\n # Determine how many numbers to select from the given list\n n_sel = numpy.random.randint(low = 1, high = len(l_temp))\n\n # Loop through the amount of numbers to be selected\n for i in range(0, n_sel):\n\n if seed is not None:\n\n numpy.random.seed(seed = seed + i)\n \n # Select a random number from the list of numbers\n sel.append(numpy.random.choice(numpy.asarray(l_temp)))\n\n # Remove the selected number from the list of numbers to prevent the same number from being selected more than once\n l_temp.remove(sel[i])\n\n # Sort the list of selected numbers\n sel.sort()\n\n return sel", "def random():\r\n return R.NextDouble()", "def _random_subset(seq,m):\n targets=random.sample(seq,m)\n return targets", "def choose(self):\n\n i = bisect.bisect(self._p, random.random())\n return self._values[i]", "def sample_from(space):\n distrs = {\n 'choice': choice,\n 'randint': randint,\n 'uniform': uniform,\n 'normal': normal,\n }\n s = space[0]\n\n np.random.seed(int(time.time() + np.random.randint(0, 300)))\n\n log = s.startswith('log_')\n s = s[len('log_'):] if log else s\n\n quantized = s.startswith('q')\n s = s[1:] if quantized else s\n\n distr = distrs[s]\n if s == 'choice':\n return distr(space[1])\n samp = distr(space[1], space[2])\n if log:\n samp = np.exp(samp)\n if quantized:\n samp = round((samp / space[3]) * space[3])\n return samp", "def choice(seq):\r\n i = int(random() * len(seq))\r\n return seq[i]", "def uniform_select(xs):\n n = len(xs) - 1\n i = randint(0, n)\n return xs[i]", "def choice(some_list, probabilities, max_probability=1):\n x = random.uniform(0, max_probability)\n cumulative_probability = 0.0\n\n for item, item_probability in zip(some_list, probabilities):\n cumulative_probability += item_probability\n if x < cumulative_probability: break\n\n return item", "def selection(self):\n\n # sort the generation according to fitness.\n self.sortByFitness()\n # get the fitness sum.\n fitnessSum = 0\n for outfit in self.currentGeneration:\n fitnessSum += self.applyFitness(outfit)\n # generate a random number\n stop = random.uniform(0, 1)\n accumulated = 0\n offset = 0\n for outfit in self.currentGenerationSorted:\n fitness = self.applyFitness(outfit) + offset\n probability = fitness / fitnessSum\n accumulated += probability\n\n if stop <= accumulated:\n return outfit", "def select_action(self, state):\r\n policy_s = self.epsilon_greedy_probs(self.nA, self.Q[state], self.count, self.epsilon)\r\n return np.random.choice(np.arange(self.nA), p=policy_s)", "def mh(N, disttype):\n xs = np.array([])\n ys = np.array([])\n pos_now = (0,0)\n accept = 0\n for i in range(N):\n pos_cand = proposal_pdf(pos_now)\n prob_stay = target_pdf(pos_now, disttype)\n prob_move = target_pdf(pos_cand, disttype)\n if prob_move / prob_stay > np.random.uniform(0,1,1):\n pos_now = pos_cand\n xs = np.append(xs, pos_now[0])\n ys = np.append(ys, pos_now[1])\n accept += 1\n return xs, ys, accept/N", "def probability(p):\n return p > random.uniform(0.0, 1.0)", "def selectRandomFromDict(ddata):\n\tdkeys = list(ddata.keys())\n\tdk = selectRandomFromList(dkeys)\n\tel = (dk, ddata[dk])\n\treturn el", "def get_random_individual():\r\n return [ random.random() for _ in range(PARAMETERS_COUNT) ]", "def set_generator(random, args):\n representation = args.get('representation')\n indices = list(range(len(representation)))\n max_size = args.get('max_size', 9)\n variable_size = args.get('variable_size', True)\n if variable_size and max_size > 1:\n size = random.randint(1, max_size)\n else:\n size = max_size\n candidate = random.sample(indices, size)\n return sorted(candidate)", "def randomize(self):\n random_pfm = [[c for c in row] for row in self.pfm]\n random.shuffle(random_pfm)\n m = Motif(pfm=random_pfm)\n m.id = \"random\"\n return m", "def next_state(self):\n \n self.state = np.random.choice(['checkout', 'dairy', 'drinks', 'fruit', 'spices'], p=self.tr_array_dict[f'{self.state}'])", "def random_element(self) -> 'PFElement':\n return random.choice(list(iter(self)))", "def rnd_choice(start, end, step, output_type=float):\n nums = np.append(np.arange(start, end, step), end)\n return output_type(np.random.choice(nums))", "def random_entry(): \n\n files = list_entries()\n return random.choice(files)", "def draw(self, nsamples):\n \n if self.dist == 'normal':\n mean = self.mean\n sd = self.sd\n self.sample = sd * np.random.randn(nsamples) + mean\n \n elif self.dist == 'poisson':\n lam = self.lam\n self.sample = np.random.poisson(lam, size=nsamples)\n \n elif self.dist == 'binomial':\n n = self.n\n p = self.p\n self.sample = np.random.binomial(n, p, size=nsamples)\n \n else:\n print('dist must be normal, poisson or binomial')", "def sample_response(self, slate_p):\n slate_p[slate_p >= 0.5] = 1.0\n slate_p[slate_p < 0.5] = 0.0\n# m = Bernoulli(slate_p)\n# return m.sample()\n return slate_p" ]
[ "0.6026738", "0.597825", "0.5953137", "0.5908189", "0.58714074", "0.5860779", "0.58515704", "0.58300185", "0.57529527", "0.57335967", "0.5725942", "0.5704148", "0.5658167", "0.565592", "0.5649849", "0.56484205", "0.5635234", "0.5629565", "0.56148976", "0.561193", "0.56036764", "0.55895513", "0.5586644", "0.5568846", "0.5561811", "0.5550518", "0.550986", "0.5508334", "0.55028427", "0.549067", "0.54860777", "0.5477316", "0.5450163", "0.54501575", "0.5448134", "0.5445222", "0.5442836", "0.5436179", "0.54351616", "0.5412445", "0.53939486", "0.5392807", "0.5392215", "0.5386629", "0.53828436", "0.53719074", "0.5371697", "0.53507096", "0.5341504", "0.53384846", "0.53284824", "0.5317308", "0.53155345", "0.53051394", "0.52966297", "0.52849793", "0.527606", "0.5273362", "0.526929", "0.5267851", "0.5267851", "0.52656054", "0.5264098", "0.52626854", "0.5256021", "0.5253998", "0.5253566", "0.5252906", "0.5251549", "0.52428085", "0.52341855", "0.5225907", "0.52065974", "0.5201863", "0.5201663", "0.5199099", "0.51878864", "0.51871586", "0.5186591", "0.5186076", "0.51790017", "0.517651", "0.5172334", "0.5168829", "0.5166552", "0.5161243", "0.51569307", "0.5151433", "0.5147932", "0.51432514", "0.514268", "0.5107258", "0.51052237", "0.510362", "0.5103174", "0.5098243", "0.5097299", "0.50958353", "0.5089464", "0.50818205" ]
0.58877283
4
Normalize the data using zscore
def normalize(ref_df_col, df_col): col_mean = ref_df_col.mean() col_std = ref_df_col.std() ref_df_norm_col = (ref_df_col - col_mean) / col_std df_norm_col = (df_col - col_mean) / col_std return ref_df_norm_col, df_norm_col
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(X):\n # z-score\n mean = np.mean(X, axis=(0, 1, 2, 3))\n std = np.std(X, axis=(0, 1, 2, 3))\n # avoid dividing zero by adding a very small number\n X = (X - mean) / (std + 1e-7)\n\n return X", "def z_score_norm(data: np.ndarray) -> np.ndarray:\n mean = np.mean(data)\n std = np.std(data)\n normalized = (data - mean) / std\n\n # Z-score normalization's result has zero-mean.\n # nan values should be replaced as zero\n normalized[np.isnan(normalized)] = 0\n return normalized", "def z_score_normalization(data):\n # import data\n\n features = data[:, 0:-1]\n target = data[:, -1]\n\n # First 10 rows\n print('Training Data:\\n\\n' + str(features))\n print('\\n')\n print('Targets:\\n\\n' + str(target))\n\n # Data standarization\n standardized_data = preprocessing.scale(features)\n\n # First 10 rows of new feature vector\n print('\\nNew feature vector:\\n')\n print(standardized_data[:10])\n print('\\n\\n')\n\n new_data = np.append(standardized_data, target.reshape(target.shape[0], -1), axis=1)\n print('\\nNew array\\n')\n print(new_data)\n\n return new_data", "def z_score(x: np.ndarray) -> np.ndarray:\n return (x - np.mean(x)) / np.std(x)", "def z_normalize(ts):\n\n ts -= np.mean(ts)\n std = np.std(ts)\n\n if std == 0:\n raise ValueError(\"The Standard Deviation cannot be zero\")\n\n #ts /= std\n return ts / std", "def z_score_normalization(feature_vector):\n\n # Data standardization\n standardized_data = preprocessing.scale(feature_vector)\n\n # First 10 rows of new feature vector\n logger.debug('New feature vector: %s', standardized_data[:10])\n\n return standardized_data", "def z_score(self, x):\n\n mean = self.mean\n stddev = self.stddev\n\n z = (x - mean) / stddev\n\n return z", "def Test():\n x=np.array([[4,-100],[1,50],[4,50]])\n x_norm=z_score(x)\n print(x_norm)\n return", "def modified_z_score(x: np.ndarray) -> np.ndarray:\n return 0.6745 * (x - np.median(x)) / median_absolute_deviation(x)", "def z_score(num, mean, std_dev):\n\treturn (num - mean) / std_dev", "def z_score_std(train, test):\n scalers = {}\n for i, sample in enumerate(train):\n scalers[i] = StandardScaler()\n train[i] = scalers[i].fit_transform(sample)\n\n for i, sample in enumerate(test):\n test[i] = scalers[i].transform(sample)\n\n return train, test", "def normalize(row):\n study = row['study']\n val = row[key]\n group_mean = df.groupby('study').mean().loc[study,key]\n group_std = df.groupby('study').std().loc[study,key]\n zval = (val - group_mean) / group_std\n return zval", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize_data(self, data):\n self.find_mean_std(data)\n return (data - self._data_mean) / self._data_std", "def z_score(raw_score):\n array = pd.Series(raw_score)\n\n mean = array.mean()\n sd = array.std(ddof=0)\n\n Z = (array-mean)/sd\n\n return(list(Z))", "def get_zscore_data(self):\n self.update_filter_inds()\n return _z_score(self)", "def zscore(vals):", "def normalize_data(data):\n mean = np.mean(data)\n std = np.std(data)\n return (data - mean) / std", "def standardize(data):\r\n mean = data.mean(axis=0)\r\n std = data.std(axis=0)\r\n return (data - mean)/std", "def _get_z_score(self, d):\n z = (d - self._mean) / self._std\n return z", "def standardize(column):\n # Finish the function so that it returns the z-scores\n\n z_score = (column - column.mean()) / column.std()\n return z_score", "def make_zscores(relfreqs, means, stdevs):\n normalized = relfreqs.sub(means, axis=\"index\")\n save_dataframe(normalized, \"2-normalized.csv\")\n #print(normalized.head())\n zscores = normalized.div(stdevs, axis=\"index\")\n save_dataframe(zscores, \"3-zscores.csv\")\n #print(zscores.head())\n return zscores", "def standardize(column):\n # Finish the function so that it returns the z-scores\n z_score = (column - column.mean()) / column.std()\n return z_score", "def z_score_transformation(data, numeric_list):\n\n transformed_data = data[numeric_list].apply(stats.zscore())\n\n return transformed_data", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def z_score(self) -> float:\n return float((self.tsdf.pct_change().iloc[-1] - self.tsdf.pct_change().mean()) / self.tsdf.pct_change().std())", "def zscore(time_series, axis=-1):\r\n time_series = np.asarray(time_series)\r\n et = time_series.mean(axis=axis)\r\n st = time_series.std(axis=axis)\r\n sl = [slice(None)] * len(time_series.shape)\r\n sl[axis] = np.newaxis\r\n zt = time_series - et[sl]\r\n zt /= st[sl]\r\n return zt", "def normalize(data):\n\n p_means = np.mean(data,axis=0)\n p_vars = np.var(data,axis=0)\n\n # subtract dc component\n data = data-p_means\n\n # contrast normalize \n data = data/np.sqrt(p_vars+10) # plus 10 to account for small variances\n \n return data", "def normalize(data):\n data_range = data.max() - data.min()\n #if data_range == 0.:\n # sys.exit(\"data.max() - data.min() == 0. !\")\n if stddev != 0.:\n data = (data - data.min()) / data_range\n\n return data", "def normalize(cls, raw_score):\n return super().normalize(raw_score)", "def normalize(cls, raw_score):\n return super().normalize(raw_score)", "def normalize(cls, raw_score):\n return super().normalize(raw_score)", "def normalize(cls, raw_score):\n return super().normalize(raw_score)", "def normalize_zmuv(x, axis=0, epsilon=1e-9):\n mean = x.mean(axis=axis)\n std = np.sqrt(x.var(axis=axis) + epsilon)\n return (x - mean[np.newaxis,:]) / std[np.newaxis,:]", "def zscore_pupil(self, dtype = 'bp_filt_pupil'):\r\n\r\n exec('self.' + str(dtype) + '_zscore = (self.' + str(dtype) + ' - np.mean(self.' + str(dtype) + ')) / np.std(self.' + str(dtype) + ')')", "def zscore(data: Tensor, epsilon: float = 1e-7) -> Tensor:\n if tf.is_tensor(data):\n data = tf.cast(data, tf.float32)\n mean = tf.reduce_mean(data)\n std = tf.keras.backend.std(data)\n return (data - mean) / tf.maximum(std, epsilon)\n elif isinstance(data, torch.Tensor):\n data = data.type(torch.float32)\n mean = torch.mean(data)\n std = torch.std(data, unbiased=False)\n return (data - mean) / torch.max(std, torch.tensor(epsilon))\n elif isinstance(data, np.ndarray):\n mean = np.mean(data)\n std = np.std(data)\n return (data - mean) / max(std, epsilon)\n else:\n raise ValueError(\"Unrecognized data type {}\".format(type(data)))", "def z_score(self, x):\n return (x - self.n) / self.p", "def standardize(data):\n stddev = data.std()\n #if stddev == 0.:\n # sys.exit(\"data.std() == 0. !\")\n if stddev != 0.:\n data = (data - data.mean()) / (data.std())\n\n return data", "def make_normalize_score():\n\n def normalize_score(data):\n score = data['score']\n norm_score = (score - 24.3) / 166.7\n\n return {**data,\n 'normalized_score': norm_score}\n\n return normalize_score", "def __calculateNormalizedScores(self):\n year_scores = {0 : []}\n for venue in self.venue_scores:\n v_scores = []\n for year in self.venue_scores[venue]:\n v_scores.append(self.venue_scores[venue][year])\n if year not in year_scores:\n year_scores[year] = []\n year_scores[year].append(self.venue_scores[venue][year])\n x_year = np.average(np.array(v_scores))\n self.venue_scores[venue][0] = x_year\n year_scores[0].append(x_year)\n \n ##for standardization\n #year_metrics = {x : (np.average(np.array(year_scores[x])), np.std(np.array(year_scores[x]))) for x in year_scores}\n ##for normalization\n year_metrics = {x: (max(year_scores[x]), min(year_scores[x])) for x in year_scores}\n \n #print year_metrics\n \n for venue in self.venue_scores:\n self.normalized_scores[venue] = dict()\n for year in self.venue_scores[venue]:\n #self.standard_scores[venue][year] = round((self.venue_scores[venue][year] - year_metrics[year][0]) / year_metrics[year][1],5)\n #self.normalized_scores[venue][year] = (self.venue_scores[venue][year] - year_metrics[year][1]) / (year_metrics[year][0] - year_metrics[year][1]) + eps\n self.normalized_scores[venue][year] = (self.venue_scores[venue][year] - year_metrics[year][1] + self.epsilon) / (year_metrics[year][0] - year_metrics[year][1] + self.epsilon)", "def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))", "def normalizeData(meanAndStd, dataset):\n\n for i in range(len(dataset)):\n for j in range(len(dataset[i])-1):\n mean = meanAndStd[j][\"mean\"]\n std = meanAndStd[j][\"std\"]\n dataset[i][j] = (dataset[i][j] - mean)/std", "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def normalize(cls, raw_score):\n assert cls.min_value == 0.0\n return super().normalize(raw_score)", "def normalize_data(self):\n self.x_mean, self.x_std = du.get_mean_std(self.x_train)\n self.x_train = du.normalize(self.x_train, self.x_mean, self.x_std)\n if self.x_test is not None and self.y_test is not None:\n self.x_test = du.normalize(self.x_test, self.x_mean, self.x_std)\n self.normalized_data = True", "def normalize_data(self, df):\n result = df.copy()\n for feature_name in self.continuous_feature_names:\n max_value = self.train_df[feature_name].max()\n min_value = self.train_df[feature_name].min()\n result[feature_name] = (\n df[feature_name] - min_value) / (max_value - min_value)\n return result", "def modified_zscore(col):\n col = col.dropna()\n med_col = col.median()\n med_abs_dev = MAD(col)\n mod_z = 0.6745*((col- med_col)/med_abs_dev)\n return np.abs(mod_z)", "def update_zscore(stats_on_target, lastest_mustd):\n target_xss, target_xs, target_xct = stats_on_target\n xmu, xstd = lastest_mustd\n\n zss = (target_xss - 2 * target_xs * xmu + target_xct * xmu ** 2) / xstd ** 2\n zs = (target_xs - target_xct * xmu) / xstd\n zct = target_xct\n\n return zss, zs, zct", "def standardize_data(data):\n return (data - np.mean(data, axis=0)) / (np.std(data, axis=0) + 10 ** -16)", "def zscore_dataframe(genes_by_sample_df):\n zscore_df = (genes_by_sample_df.sub(genes_by_sample_df.mean(axis=1), axis=0)).truediv(\n np.maximum(genes_by_sample_df.std(axis=1), 1e-12), axis=0)\n return zscore_df", "def normalize(self, X):\n return X - X.mean()", "def normalize(self):\n self._data /= self.norm()", "def test_compute_unnormalized_scores(self):\n # todo: implement this test!\n pass", "def lz (inlist, score):\r\n z = (score-mean(inlist))/samplestdev(inlist)\r\n return z", "def normalization_test(x_test, meanV, stdV): \n eps = np.finfo(float).eps \n x_test_post = (x_test - meanV)/(stdV + eps) \n \n return x_test_post", "def transform(self, data):\n data -= self.mean\n if 0.0 in self.std:\n self.std = np.where(self.std == 0.0, 1.0, self.std)\n data /= self.std\n return data", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def normalize(data):\n # normalize data and return\n # https://stackoverflow.com/questions/29661574/normalize-numpy-array-columns-in-python\n return (data - data.min(axis=0)) / data.ptp(axis=0)", "def compute_zscore(input_data, comp_name):\n try:\n input_data = np.array(input_data[comp_name])\n except KeyError:\n print(\"The company is not included in our database\")\n return False\n # Initialize the z-score list\n zscore = []\n # For each piece of data, compute its z-score\n for i in range(len(input_data)):\n daily_zscore = (input_data[i] - np.mean(input_data))/np.std(input_data)\n zscore.append(daily_zscore)\n return zscore[0]", "def normalize(self, df):\n return (df - df.mean()) / (df.max() - df.min())", "def normalize(values):\n return (values - np.mean(values)) / np.std(values)", "def normalize_test_6(self):\n\n res = self.XYZ_factor_n.normalize([self.Y, self.Z])\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/10, 1/10, 2/10, 2/10, 3/10, 3/10, 4/10, 4/10])", "def standardize(self, inputData):\n\n return (inputData - self.mean) / self.std", "def zscore_by_group(X, labels, group):\n assert(X.shape[0] == len(labels))\n idx = np.where(labels == group)[0]\n X_group_mean = np.mean(X.loc[idx], axis=0)\n X_group_std = np.std(X.loc[idx], axis=0)\n return((X - X_group_mean) / X_group_std)", "def normalize2(data):\n return old_div(data,np.max([np.max(data),-1.0*np.min(data)]))", "def normalize(score, alpha=15):\n norm_score = score / math.sqrt((score * score) + alpha)\n if norm_score < -1.0:\n return -1.0\n elif norm_score > 1.0:\n return 1.0\n else:\n return norm_score", "def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x", "def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X", "def _normalize(self):\r\n self.dataframe['norm_intensity'] = self.dataframe['intensity']\r\n self.dataframe['norm_intensity'] -= self.dataframe['norm_intensity'].min()\r\n self.dataframe['norm_intensity'] /= self.dataframe['norm_intensity'].max() * 0.01", "def normalize(data):\n row = np.size(data, 0) # number of data points\n col = np.size(data, 1) # dimensionality of data points\n for j in range(col):\n # find the average for each column\n col_sum = 0\n for i in range(row):\n col_sum = col_sum + data[i][j]\n col_sum = col_sum / row\n # subtract the average from each value in the column\n for i in range(row):\n data[i][j] = data[i][j] - col_sum\n return data", "def normalize(data):\n row = np.size(data, 0) # number of data points\n col = np.size(data, 1) # dimensionality of data points\n for j in range(col):\n # find the average for each column\n col_sum = 0\n for i in range(row):\n col_sum = col_sum + data[i][j]\n col_sum = col_sum / row\n # subtract the average from each value in the column\n for i in range(row):\n data[i][j] = data[i][j] - col_sum\n return data", "def normalize_test_6(self):\n\n res = self.XYZ_factor_n.normalize([self.X, self.Z])\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/8, 1/12, 2/8, 2/12, 3/8, 3/12, 4/8, 4/12])", "def standardize_data(Xtrain,Xtest):\n \n ### Import modulates\n import numpy as np\n\n Xmean = np.nanmean(Xtrain,axis=0)\n Xstd = np.nanstd(Xtrain,axis=0)\n Xtest = (Xtest - Xmean)/Xstd\n Xtrain = (Xtrain - Xmean)/Xstd\n \n stdVals = (Xmean,Xstd)\n stdVals = stdVals[:]\n \n return Xtrain,Xtest,stdVals", "def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data", "def normaliseScores(essays):\n normalised_scores = []\n for index, essay in essays.iterrows():\n score = essay['domain1_score']\n # essay_set refers to the prompt(topic) of the essay\n essay_set = essay['essay_set']\n # Perform min-max normalization on the scores to get range in [0-1]\n normalised_score = (score - MIN_SCORES[essay_set]) / (MAX_SCORES[essay_set] - MIN_SCORES[essay_set])\n normalised_scores.append(normalised_score)\n return np.array(normalised_scores)", "def normalize(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_NORM) )\n\n ret_df = df.copy()\n t = ret_df[comm_keys]\n ret_df[comm_keys] = (t - t.mean()) / t.std()\n\n return ret_df", "def set_normalization(self, dataloader):\n mean = 0\n square = 0\n for (data_in, _) in dataloader:\n mean += data_in.mean()\n square += data_in.pow(2).mean()\n\n mean /= len(dataloader)\n square /= len(dataloader)\n std = np.sqrt(square - mean ** 2)\n\n # The input data should be roughly normally distributed after\n # passing through net_fixed.\n self.scale_in.bias.data.fill_(- mean / std)\n self.scale_in.weight.data.fill_(1 / std)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def normalize(self, attr_name): # DONE\n self.data[attr_name] = (self.data[attr_name] - self.data[attr_name].mean()) / self.data[attr_name].std()", "def norm_data(data):\n return (data-np.min(data))/(np.max(data)-np.min(data))", "def standardize(X):\n mu = X.mean(axis=0, keepdims=True)\n s = X.std(axis=0, keepdims=True)\n return (X-mu)/s", "def standardize(X):\n X_std = X\n mean = X.mean(axis=0)\n std = X.std(axis=0)\n for col in range(np.shape(X)[1]):\n if std[col]:\n X_std[:, col] = (X_std[:, col] - mean[col]) / std[col]\n # X_std = (X - X.mean(axis=0)) / X.std(axis=0)\n return X_std", "def normalize_test_6(self):\n\n res = self.XYZ_factor_n.normalize(self.Z)\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/4, 1/4, 2/6, 2/6, 3/4, 3/4, 4/6, 4/6])", "def normalize_feature(df):\n return df.apply(lambda column: (column - column.mean()) / column.std())", "def to_norm(data):\n print('The dtgeostats.utils.to_norm function is under construction - use with caution...')\n mu = np.mean(data)\n sd = np.std(data)\n z = (data - mu) / sd\n bins = len(z)\n\n # Get cumulative probability and normal-score values\n counts, bin_edges = np.histogram(z, bins=bins, normed=True)\n cprob = np.cumsum(counts)/sum(counts)*0.99 # = f[:, 1] or inv[:, 0]\n nscore_value = (bin_edges[:-1] + bin_edges[1:]) / 2 # = f[:, 0] or inv[:, 1]\n\n # Apply to data\n z = st.norm(0, 1).ppf(cprob)\n z = np.where(z == np.inf, np.nan, z)\n z = np.where(np.isnan(z), np.nanmax(z), z)\n return z, cprob, nscore_value", "def normalize_test_7(self):\n\n res = self.XYZ_factor_n.normalize([self.X, self.Y, self.Z])\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/20, 1/20, 2/20, 2/20, 3/20, 3/20, 4/20, 4/20])\n\n res = self.XYZ_factor_n.normalize()\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/20, 1/20, 2/20, 2/20, 3/20, 3/20, 4/20, 4/20])", "def normalise(self):\n fitness_sum = np.sum(self.fitness)\n for i in range(self.loops):\n self.normalised_fitness[i] = self.fitness[i] / fitness_sum", "def standardize(X):\n\n scaler = StandardScaler()\n X_scaled = scaler.fit_transform(X)\n return X_scaled", "def scalarNormalizer(df):\r\n arr=dict()\r\n for col in CONT_FEATURES_COL_TO_USE:\r\n mean, std =df[col].mean(), df[col].std()\r\n df[col]=df[col].apply(lambda x: (x-mean)/std)\r\n arr[col] = [mean, std]\r\n json.dump(arr, open('normalize.json', 'w'))\r\n return df", "def normalize(self):\n total = self.total()\n for x in self.d:\n self.d[x] /= total\n return total", "def _normalize_scores(scores):\n # sort scores in an ascending order\n sorted_scores,sorted_idx = scores.view(-1).sort(descending=False)\n # compute cumulative sum\n scores_cumsum_temp = sorted_scores.cumsum(dim=0)\n scores_cumsum = torch.zeros(scores_cumsum_temp.shape,device=scores.device)\n scores_cumsum[1:] = scores_cumsum_temp[:len(scores_cumsum_temp)-1]\n # normalize by cumulative sum\n sorted_scores /= (scores.sum() - scores_cumsum)\n # tidy up and output\n new_scores = torch.zeros(scores_cumsum.shape,device=scores.device)\n new_scores[sorted_idx] = sorted_scores\n \n return new_scores.view(scores.shape)", "def zscore(a, population=None):\n x = np.asarray(a)\n if population is None:\n mu, sigma = x.mean(), x.std()\n else:\n if type(population) in (tuple, list) and len(population) == 2:\n mu, sigma = population\n elif type(population) is np.ndarray:\n mu, sigma = population.mean(), population.std()\n else:\n raise ValueError, 'population must be (mu, sigma) tuple or array'\n Z = (x - mu) / sigma\n if Z.size < 2:\n Z = float(np.squeeze(Z))\n return Z", "def normalize_data(df):\r\n return df/df.ix[0,:]", "def normalize_features(X):\n std = X.std(axis=0)\n std = np.where(std == 0, 1, std) # to avoid division by zero\n x_normed = (X - X.mean(axis=0)) / std\n return x_normed", "def normalize_weight(self, Z):\n self.weight /= Z", "def _transform(self, data):\r\n mean, variance = self._input_statistics.overall_feature_moments\r\n return (data - mean) / variance", "def compute_z_score(stats, columns, col_name):\n if stats[col_name]['data_type'] != DATA_TYPES.NUMERIC:\n return {}\n\n z_scores = list(map(abs,(st.zscore(columns[col_name]))))\n threshold = 3\n z_score_outlier_indexes = [i for i in range(len(z_scores)) if z_scores[i] > threshold]\n data = {\n 'z_score_outliers': z_score_outlier_indexes\n ,'mean_z_score': round(10 * (1 - np.mean(z_scores)))\n ,'z_test_based_outlier_score': round(10 * (1 - len(z_score_outlier_indexes)/len(columns[col_name])))\n ,'z_test_based_outlier_score_description':\"\"\"\n This score indicates the amount of data that are 3 STDs or more away from the mean. That is to say, the amount of data that we consider to be an outlir. A hgih z socre means your data contains a large amount of outliers.\n \"\"\"\n }\n return data", "def normalize_standard_deviation(dataset):\n return dataset*(1/np.std(dataset))", "def scale_data(self, data):\n return (data - self.mean)/self.std" ]
[ "0.78604925", "0.7845451", "0.7444941", "0.72904104", "0.70869136", "0.70312285", "0.69701195", "0.6964201", "0.69579893", "0.688568", "0.6840598", "0.67974013", "0.6757654", "0.6757654", "0.67462033", "0.6714735", "0.6691225", "0.66248816", "0.6613352", "0.65668243", "0.65464634", "0.65410167", "0.650756", "0.6499668", "0.6465587", "0.64531845", "0.63961124", "0.63877606", "0.63705176", "0.6361901", "0.63610077", "0.63610077", "0.63610077", "0.63610077", "0.63330805", "0.633023", "0.6327046", "0.6321701", "0.6310224", "0.62968653", "0.62790805", "0.62756294", "0.62637764", "0.6247968", "0.623173", "0.6227447", "0.6186385", "0.6183267", "0.6167727", "0.6163236", "0.6159784", "0.6141194", "0.61356515", "0.61109567", "0.6110861", "0.61001104", "0.6099103", "0.60951763", "0.6091274", "0.6077198", "0.60663515", "0.6062997", "0.60621154", "0.6056158", "0.6048928", "0.6036964", "0.5999715", "0.5995186", "0.5983156", "0.5974766", "0.59648544", "0.59648544", "0.595983", "0.5956394", "0.59443545", "0.59117186", "0.5909469", "0.5898664", "0.58917236", "0.58917236", "0.5890674", "0.5879649", "0.5879179", "0.58790016", "0.58685225", "0.58567584", "0.5849993", "0.58495283", "0.5848166", "0.58475673", "0.58471364", "0.58344203", "0.58296424", "0.5805689", "0.5801822", "0.57944304", "0.5792139", "0.5786672", "0.5781702", "0.57809305", "0.5779572" ]
0.0
-1
Find the difference in unique counts of two distributions and return as percentage
def compute_unique_count_drift(df_prob, ref_df_prob): df_diff = set(df_prob.keys()) - set(ref_df_prob.keys()) ref_df_diff = set(ref_df_prob.keys()) - set(df_prob.keys()) return sum([df_prob[k] for k in df_diff] + [ref_df_prob[k] for k in ref_df_diff])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _distr_stat(col1, col2, f):\n bin_threshold = 10\n vcs1, col1_len = col1.value_counts().to_dict(), float(len(col1))\n vcs1[\"_Empty_\"] = sum(col1.map(lambda x: is_null_flag(x)))\n vcs2, col2_len = col2.value_counts().to_dict(), float(len(col2))\n vcs2[\"_Empty_\"] = sum(col2.map(lambda x: is_null_flag(x)))\n values = set.union(set(vcs1.keys()), set(vcs2.keys()))\n stat = 0\n if len(values) <= bin_threshold:\n for v in values:\n v_share1 = (vcs1.get(v, 0)+1)/col1_len\n v_share2 = (vcs2.get(v, 0)+1)/col2_len\n stat += f(v_share1, v_share2)\n else:\n null_share1 = (vcs1.pop(\"_Empty_\")+1)/col1_len\n null_share2 = (vcs2.pop(\"_Empty_\")+1)/col2_len\n if null_share1 >= 1 or null_share2 >= 1:\n stat += f(null_share1, null_share2)\n elif _is_number_list(vcs1.keys()) and _is_number_list(vcs2.keys()):\n bins1, bins2 = _bin_numbers(col1, col2, bin_threshold)\n for v in range(bin_threshold):\n v_share1 = (bins1.get(v, 0)+1)/col1_len\n v_share2 = (bins2.get(v, 0)+1)/col2_len\n stat += f(v_share1, v_share2)\n stat += f(null_share1, null_share2)\n else:\n bins1, bins2 = _bin_char(col1, col2, bin_threshold)\n bin_char_threshold = max(len(bins1.keys()), len(bins2.keys()))\n for v in range(bin_char_threshold):\n v_share1 = (bins1.get(v, 0)+1)/col1_len\n v_share2 = (bins2.get(v, 0)+1)/col2_len\n stat += f(v_share1, v_share2)\n stat += f(null_share1, null_share2)\n # else:\n # stat = \"Too many unique values or not numbers.\" # excepted by TypeError (round)\n print(stat)\n return round(stat, 4)", "def percent_identity(align_1, align_2):\n matches = 0\n for i in range(len(align_1)):\n if align_1[i] == align_2[i]:\n matches+= 1\n percent_identity = matches / len(align_1)\n return percent_identity", "def image_diff_percent(image_a, image_b):\n\n # if paths instead of image instances where passed in\n # load the images\n if isinstance(image_a, str):\n image_a = Image.open(image_a)\n\n if isinstance(image_b, str):\n image_b = Image.open(image_b)\n\n # first determine difference of input images\n input_images_histogram_diff = image_diff(image_a, image_b)\n\n # to get the worst possible difference use a black and a white image\n # of the same size and diff them\n\n black_reference_image = Image.new('RGB', image_a.size, (0, 0, 0))\n white_reference_image = Image.new('RGB', image_a.size, (255, 255, 255))\n\n worst_bw_diff = image_diff(black_reference_image, white_reference_image)\n\n percentage_histogram_diff = (input_images_histogram_diff / float(worst_bw_diff)) * 100\n\n return percentage_histogram_diff", "def contains_percentage_of(self, other: 'Interval') -> float:\n if other.length == 0:\n return other.a in self\n intersection = Interval.intersection([self, other])\n return intersection.length / other.length if intersection else 0.0", "def pct_match(self, s1, s2, comp_length):\n\n matches = self.max_freq[s1:s1+comp_length] \\\n == self.max_freq[s2:s2+comp_length]\n return np.ma.sum(matches) / np.ma.count(matches)", "def KL_divergence(value_counts1, value_counts2):\n divergence = 0\n s1 = sum([value_counts1[value] for value in value_counts1])\n s2 = sum([value_counts2[value] for value in value_counts2])\n for value in set(value_counts1).union(value_counts2):\n assert(value in value_counts1 or value in value_counts2)\n if value not in value_counts1:\n s1 += KL_SMOOTHING\n if value not in value_counts2:\n s2 += KL_SMOOTHING\n for value in set(value_counts1).union(value_counts2):\n v1 = v2 = KL_SMOOTHING\n if value in value_counts1:\n v1 = value_counts1[value]\n if value in value_counts2:\n v2 = value_counts2[value]\n v1 = float(v1) / s1\n v2 = float(v2) / s2\n divergence += v1 * math.log(v1 / v2)\n if divergence > math.e:\n divergence = math.e\n return divergence", "def js_divergence(dist1, dist2):\n mean_dist = (dist1 + dist2) / 2.0\n js = (\n scipy.stats.entropy(dist1, mean_dist) + scipy.stats.entropy(dist2, mean_dist)\n ) / 2.0\n return js", "def match_percentage(image1_pixels, image2_pixels):\n\n match, total = 0, 0\n for i in range(len(image1_pixels)):\n if image1_pixels[i] == image2_pixels[i]:\n match += 1\n total += 1\n else:\n total += 1\n return float(match) / float(total)", "def percentage(a, b):\n return (a * 100.0) / b", "def _compare_cont_hist(b1, b2, h1, h2):\n\n b1 = copy.deepcopy(b1)\n h1 = copy.deepcopy(h1)\n b2 = copy.deepcopy(b2)\n h2 = copy.deepcopy(h2)\n\n bd1 = [float(x) for x in b1]\n bd2 = [float(x) for x in b2]\n\n inf = float('inf')\n\n if bd1[0] == -inf:\n del bd1[0]\n del h1[0]\n if bd1[-1] == inf:\n del bd1[-1]\n del h1[-1]\n if bd2[0] == -inf:\n del bd2[0]\n del h2[0]\n if bd2[-1] == inf:\n del bd2[-1]\n del h2[-1]\n\n cbe = sorted(list(set(bd1) | set(bd2)))\n\n total = len(cbe)\n\n curr1 = 0\n curr2 = 0\n init = False\n rmse = 0.0\n\n if sum(h1) == 0 or sum(h2) == 0:\n return 0\n\n for index in range(total):\n if init is False:\n init = True\n prev1 = 0\n prev2 = 0\n else:\n if (curr1 > prev1 and curr1 < len(bd1)):\n sh1 = float(h1[prev1] * (cbe[index] - cbe[index - 1])) / (bd1[curr1] - bd1[prev1])\n else:\n sh1 = 0.0\n if (curr2 > prev2 and curr2 < len(bd2)):\n sh2 = float(h2[prev2] * (cbe[index] - cbe[index - 1])) / (bd2[curr2] - bd2[prev2])\n else:\n sh2 = 0.0\n\n if math.isnan(sh1) is False and math.isnan(sh2) is False:\n sh1 = sh1 / sum(h1)\n sh2 = sh2 / sum(h2)\n rmse += ((sh1 - sh2) ** 2)\n\n if (curr1 < len(bd1) and bd1[curr1] <= cbe[index]):\n prev1 = curr1\n curr1 += 1\n if (curr2 < len(bd2) and bd2[curr2] <= cbe[index]):\n prev2 = curr2\n curr2 += 1\n\n rmse = (rmse) ** 0.5\n\n print(\"Cont: rmse score: {}\".format(rmse))\n return rmse", "def compute_fraction_in_output(siatechf_output, other_output):\n\n count = 0\n for tec in other_output:\n if tec in siatechf_output:\n count += 1\n\n return count / len(other_output)", "def percent_overlap(items1, items2, k = None):\n if k is None:\n k = max([len(items1), len(items2)])\n assert k > 0 and k <= max([len(items1), len(items2)]), 'k is out of bounds!'\n items1_set, items2_set = set(items1[:k]), set(items2[:k])\n return len(items1_set & items2_set) / len(items1_set | items2_set)", "def probability(s, a, b):\r\n return s.cdf(b) - s.cdf(a)", "def _compare_cat_hist(b1, b2, h1, h2):\n cbe = list(set(b1) | set(b2))\n\n total = len(cbe)\n rmse = 0.0\n\n if sum(h1) == 0 or sum(h2) == 0:\n return 0.0\n\n for index in range(total):\n sh1 = 0.0\n sh2 = 0.0\n try:\n sh1 = float(h1[b1.index(cbe[index])])\n except Exception as e:\n sh1 = 0.0\n try:\n sh2 = float(h2[b2.index(cbe[index])])\n except Exception as e:\n sh2 = 0.0\n\n sh1 = sh1 / sum(h1)\n sh2 = sh2 / sum(h2)\n rmse += ((sh1 - sh2) ** 2)\n\n rmse = (rmse) ** 0.5\n print(\"Cat: rmse score: {}\".format(rmse))\n return rmse", "def _class_distribution(y):\n unique, counts = np.unique(y, return_counts = True)\n\n percentages = counts / np.sum(counts)\n\n return unique, counts, percentages", "def calculate_percent_match(primers,\n seq_count,\n exclude_seq_count=1):\n # Calculate percent of sequences that are 'hit' by each primer\n for n in range(len(primers)):\n # Calculate percent perfect match\n primers[n].percent_match=float(primers[n].match_count/seq_count)\n primers[n].non_specific_percent=\\\n float(primers[n].non_specific_hits/exclude_seq_count)\n \n return primers", "def supervised_count_feature(s1,s0):\n a1=s1.groupby(s1).count()\n a0=s0.groupby(s0).count()\n b0,b1=a0.align(a1)\n c1=b1.fillna(0).rename('c1')\n c0=b0.fillna(0).rename('c0')\n ss=((c1-c0)/(c1+c0)).rename('diff_percentage')\n tt=pd.concat([ss,c1,c0],axis=1)\n return tt", "def calculate_distribution_distance(freqs1, freqs2):\n A = np.array([freqs1, freqs2])\n p_value = calculate_chi_square_p_value(A)\n return 1 - p_value", "def correspondences(labels1,labels2,return_counts=True):\n q = 100000\n assert amin(labels1)>=0 and amin(labels2)>=0\n assert amax(labels2)<q\n combo = labels1*q+labels2\n result = unique(combo, return_counts=return_counts)\n if return_counts:\n result, counts = result\n result = array([result//q,result%q,counts])\n else:\n result = array([result//q,result%q])\n return result", "def dices(x,y):\r\n import sets\r\n x = sets.Set(x)\r\n y = sets.Set(y)\r\n common = len(x.intersection(y))\r\n total = float(len(x) + len(y))\r\n return 2*common/total", "def similarity_two_images_color(img1: np.ndarray, img2: np.ndarray) -> np.ndarray:\n hist_image_1 = histogram_of_image_color(img1, HIST_BINS_INTENSITY, BIN_DIFFERENCE_INTENSITY_HALF)\n hist_image_2 = histogram_of_image_color(img2, HIST_BINS_INTENSITY, BIN_DIFFERENCE_INTENSITY_HALF)\n max_difference = max(2 * np.sum(hist_image_1), 2 * np.sum(hist_image_2))\n return 100 - 100 * np.sum(np.absolute(hist_image_1 - hist_image_2)) / max_difference", "def metric_value(x, y):\n return (sum(np.all(x == y, axis=1) * 1) / len(x)) * 100", "def dominance(counts):\n freqs = counts/float(counts.sum())\n return (freqs*freqs).sum()", "def _ratio(a1, a2):\n abs_residues = np.abs(a1 - a2).sum()\n avg_abs_sum = 0.5 * np.abs(a1).sum() + 0.5 * np.abs(a2).sum()\n return abs_residues / avg_abs_sum", "def baseline_score(self,t0,t1):\n return len(set(t0) & set(t1))/len(set(t0).union(set(t1)))", "def dist(a, b):\n return np.sum((a-b)**2.0)**.5", "def get_identical_score(bin1,bin2=None):\n if bin2==None: bin2=[]\n tmpscore=0.0\n norm=0\n for ali1 in bin1:\n tmpscore+=get_subscore(ali1,ali1)\n norm+=1\n for ali2 in bin2:\n tmpscore+=get_subscore(ali2,ali2)\n norm+=1\n return tmpscore/norm", "def find_pcts(p1, p2, start_b = [], iter = 10000):\n win_record = []\n for i in range(iter):\n deck = Deck()\n need = 5 - len(start_b)\n b2 = draw_sure(deck, need, p1+p2+start_b)\n win_record.append(_who_wins(start_b + b2, p1, p2, printout = False))\n return [win_record.count(1) / float(len(win_record)), \n win_record.count(2) / float(len(win_record))\n ]", "def cng_dissimilarity(self, other):\n dissimilarity = 0.0\n for ngram in set(self) | set(other):\n dissimilarity += (2 * (self[ngram] - other[ngram]) /\n (self[ngram] + other[ngram])) ** 2\n return dissimilarity", "def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n \n for i in d1:\n total = total + d1[i]\n for i in d2:\n if i in d1:\n if total == 0:\n score = score\n else:\n probablility = (d1[i] / total)\n score = score + (math.log10(probablility) * d2[i])\n else:\n if total == 0:\n score = score\n else:\n score = score + ((0.5 / total) * d2[i])\n return score", "def mcintosh_d(counts):\n u = sqrt((counts*counts).sum())\n n = counts.sum()\n return (n-u)/(n-sqrt(n))", "def percentOverlap(x1, x2):\n nonZeroX1 = np.count_nonzero(x1)\n nonZeroX2 = np.count_nonzero(x2)\n minX1X2 = min(nonZeroX1, nonZeroX2)\n percentOverlap = 0\n if minX1X2 > 0:\n percentOverlap = float(np.dot(x1.T, x2)) / float(minX1X2)\n return percentOverlap", "def make_shares_comparison(glass, ch, variable):\n out = (\n pd.concat(\n [df[[variable]].value_counts(normalize=True) for df in [glass, ch]], axis=1\n )\n .rename(columns={0: \"glass\", 1: \"companies\"})\n .assign(share_norm=lambda x: (x[\"glass\"] / x[\"companies\"]) - 1)\n )\n return out", "def derive_count(freq1: typing.List[int], freq2: typing.List[int]) -> int:\n count = 0\n for i in range(26):\n count += min(freq1[i], freq2[i])\n return count", "def _ratio(counter1, counter2, smoothing, min_freq=0):\n # Ratios dict to be returned\n ratios = {}\n \n # Set up for smoothing\n # The number of outcomes is the number of unique words across the two wordlists\n all_words = set(counter1)\n all_words.update(counter2)\n n_outcomes = len(all_words)\n # Total tokens per wordlist\n counter1_obs = sum(counter1.values())\n counter2_obs = sum(counter2.values())\n\n # First loop over counter1. You could conceiveably combine these loops into one\n # that first runs on counter1 then counter2, but then you'd have to keep track\n # of which one is the numerator, which makes for more work\n for item, count1 in counter1.items():\n # Filter below min_freq\n if count1 < min_freq:\n continue\n\n # Compute ratio\n count2 = counter2[item]\n ratios[item] = _logprobratio(_lidstone_smooth(count1, smoothing, counter1_obs, n_outcomes),\n _lidstone_smooth(count2, smoothing, counter2_obs, n_outcomes))\n\n # Rinse and repeat for counter2\n for item, count2 in counter2.items():\n # Skip items already counted and filter below min_freq\n if count2 < min_freq or item in ratios:\n continue\n\n # Compute ratio\n count1 = counter1[item]\n ratios[item] = _logprobratio(_lidstone_smooth(count1, smoothing, counter1_obs, n_outcomes),\n _lidstone_smooth(count2, smoothing, counter2_obs, n_outcomes))\n\n return ratios", "def get_dists_2():\n d1 = Distribution(['0', '1'], [1 / 2, 1 / 2])\n d2 = Distribution(['0', '1'], [1 / 3, 2 / 3])\n d3 = Distribution(['0', '1'], [2 / 5, 3 / 5])\n return d1, d2, d3", "def count_unique_percent(df):\n #flatten DataFrame to one dimensional array and convert it to Series object\n series = pd.Series(df.as_matrix().reshape(-1))\n #count unique values percentage\n series.value_counts()\n unique_counts_pct = series.value_counts(normalize=True)\n return unique_counts_pct", "def _fraction_latency(self, users_distances):\n\n users_desired_latency = np.array(list(map(lambda a: self.services_desired_latency[a],\n self.users_services)))\n check = users_distances < users_desired_latency\n fraction = np.count_nonzero(check==True) / self.num_of_users\n return fraction", "def _dist(a, b):\n return torch.pow(a - b, 2).sum(-1)", "def dist(self, one, two):\n return sum((one[0] != two[0], one[1] != two[1]))", "def getSimilarity(edges1,edges2):\n\tcount = 0\n\tfor edge in edges2:\n\t\tif (edge in edges1):\n\t\t\tcount +=1\n\treturn count / len(edges2)", "def test_get_dup_labels_perc_with_dupes(self):\r\n\r\n # half of the labels are duplicates\r\n\r\n labels = ['seq1', 'seq2', 'seq1', 'seq2']\r\n\r\n actual_perc, dups = get_dup_labels_perc(labels)\r\n\r\n expected_perc = \"%1.3f\" % 0.5\r\n\r\n self.assertEqual(actual_perc, expected_perc)\r\n\r\n expected_dups = ['seq1', 'seq2']\r\n\r\n self.assertEqual(set(dups), set(expected_dups))", "def score(stripe1, stripe2):\n scr = 0\n count = 0\n for p1, p2 in zip(stripe1, stripe2):\n r = abs(p1[0] - p2[0])\n g = abs(p1[1] - p2[1])\n b = abs(p1[2] - p2[2])\n scr += r + g + b\n return scr", "def fitness(im1,im2):\n\n arr1 = np.array(im1,np.int16) # Creates array of image to easily calculate the difference between pixels.\n arr2 = np.array(im2,np.int16) #np.int16 is used to change the dtype\n\n\n dif = np.sum(np.abs(arr1-arr2))\n\n return (dif/255.0 * 100)/arr1.size", "def matching_score(self,set1, set2):\n set_set1=set(set1)\n set_set2=set(set2)\n '''print(\" set_set12\")\n print(set_set1)\n print(set_set2)'''\n return len(set_set1.intersection(set_set2)) ** 2 / (float(len(set1)) * len(set2))\n #return len(set_set1.intersection(set_set2)) / len(set_set1.union(set_set2))", "def percentage_common_types (corpus): \n total = sum([t[1] for t in most_frequent(corpus)])\n return percentage(total, corpus_length(corpus))", "def _compute_x2_statistic(self, expected, actual):\n rng = expected.keys()\n if actual.keys() != rng:\n raise Exception(\"Ranges of two frequencies are not equal.\")\n num_observations = sum([actual[r] for r in rng])\n if abs(num_observations - sum([expected[r] for r in rng])) > _FLOAT_EQ_DELTA:\n raise Exception(\"Frequencies must sum to the same value.\")\n chi_squared_stat = sum([(actual[r] - expected[r])**2 / max(float(expected[r]), 1.0)\n for r in rng])\n p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, # Find the p-value\n df=len(rng))\n return chi_squared_stat, p_value", "def compare_sentences(first, second):\n if not len(first) or not len(second):\n return 0\n return len(set(only_important(first)) & set(only_important(second))) / ((len(first) + len(second)) / 2.0)", "def gridratio( grid1, grid2):\n\n nx1 = grid1.img_width\n ny1 = grid1.img_height\n nx2 = grid2.img_width\n ny2 = grid2.img_height\n\n ratio = 0.\n rms = 0.\n\n if nx1 != nx2:\n print(\"GridRatio: Nx1 != Nx2 (%d, %d)\" % (nx1, nx2))\n return ratio, rms\n\n if ny1 != ny2:\n print(\"GridRatio: Ny1 != Ny2 (%d, %d)\" % (ny1, ny2))\n return ratio, rms\n\n count = 0\n nonzero = np.zeros(nx1*ny1)\n\n # copy to ratio array\n gridratio = copy.deepcopy( grid1)\n\n for iii in range(nx1):\n for jjj in range(ny1):\n # put in zero as default\n gridratio.image[jjj,iii] = 0.\n if grid1.image[jjj,iii] > EPSILON:\n if grid2.image[jjj,iii] > EPSILON:\n nonzero[count] = grid1.image[jjj,iii]/grid2.image[jjj,iii]\n count = count + 1\n if count < 2:\n print (\"No overlap in non-zero samples\")\n return ratio, rms, gridratio\n\n nonzero = nonzero[0:count]\n asum = np.sum( nonzero)\n ratio = asum/float(count)\n rms = np.std( nonzero)\n print (\"Grid Ratio: %.4f +/- %.4f for %d samples\" % (ratio, rms/np.sqrt(count), count))\n # return the ratio grid \n return ratio, rms, gridratio", "def dist(string1, string2):\n if string1 == string2:\n return 0\n count1 = Counter(string1)\n count2 = Counter(string2)\n\n keys = set(count1.keys())\n keys.update(count2.keys())\n dist = sum(abs(count1.get(letter, 0) - count2.get(letter, 0)) for letter in keys)\n return dist", "def ds_ratio(group):\n nix_count = (group=='nix').sum()\n top_count = (group=='top').sum()\n ratio = nix_count/(nix_count+top_count) #could smooth this\n return ratio", "def compute_js_divergence(df_1, df_2, n_bins=30):\n a = np.concatenate((df_1, df_2), axis=0)\n e, p = prob_mass_fun(df_1, n = n_bins, range = (a.min(), a.max()))\n _, q = prob_mass_fun(df_2, n = e, range = (a.min(), a.max()))\n\n return scipy.spatial.distance.jensenshannon(p, q)", "def hamming_dist(gene_1, gene_2):\n ham_dist = 0\n for c1, c2 in zip(gene_1, gene_2):\n if c1 != c2:\n ham_dist += 1\n return ham_dist", "def get_percentage_false_class(arr_of_results):\n\n count_success = np.zeros_like(arr_of_results[:,0], dtype=float)\n count_correct_prediction = 0\n\n for i in range(len(arr_of_results[0])):\n use = True\n for result in arr_of_results[:,i]:\n if result[\"image_target\"] != result[\"prediction_image\"] or result[\"std_noise\"] == 0:\n use = False\n if use:\n count_correct_prediction += 1\n i2 = 0\n for result in arr_of_results[:,i]:\n if result[\"success\"]:\n count_success[i2] += 1\n i2 += 1\n\n\n errors = proportion_confint(count_success, count_correct_prediction)\n count_success = count_success/count_correct_prediction\n errors = np.array(errors)\n\n errors[0] = np.abs(count_success - errors[0])\n errors[1] = np.abs(count_success - errors[1])\n\n return count_success, errors", "def metric(x,y):\n sm = x + y\n df = x - y\n div = sm / df if df != 0 else 0\n return \"sum is %s \" %sm, \"difference is %s \" %df, \"division of difference to sum is %s\" %div", "def percentage(count, total):\n return count / total * 100", "def hellingsdowns_factor(pulsar1, pulsar2):\n sep = pulsar1.location.separation(pulsar2.location).to('radian')\n first = (1 - np.cos(sep))/2\n first = first * np.log(first)\n second = (1 - np.cos(sep))/2\n last = 0\n if pulsar1 == pulsar2: return 1 #last = 0.5\n return 1.5*first - 0.25 * second + 0.5", "def get_dup_labels_perc(fasta_labels):\r\n fasta_labels_count = float(len(fasta_labels))\r\n fasta_labels_derep = float(len(set(fasta_labels)))\r\n\r\n perc_dup = \"%1.3f\" %\\\r\n ((fasta_labels_count - fasta_labels_derep) / fasta_labels_count)\r\n\r\n label_counts = defaultdict(int)\r\n for curr_label in fasta_labels:\r\n label_counts[curr_label] += 1\r\n\r\n labels_from_dups = []\r\n for label in label_counts:\r\n if label_counts[label] > 1:\r\n labels_from_dups.append(label)\r\n\r\n return perc_dup, labels_from_dups", "def get_diff_and_percentage(self, first, second, state):\n difference = first - second\n per_difference = (difference / second) * 100\n total_percentage = (first / self.populations[state]) * 100\n return [difference, per_difference, total_percentage]", "def nucross(a, b):\n ev = a / np.linalg.norm(a)\n return np.linalg.norm(np.cross(ev, b))", "def compare_0_and_2(clf_name, print_dist=True):\n count0, dist0 = distribution_of_result(clf_name, all=0, print_res=False)\n count2, dist2 = distribution_of_result(clf_name, all=2, print_res=False)\n\n tp0 = list()\n for value in dist0['TP'].values():\n tp0.extend(value)\n\n tn0 = list()\n for value in dist0['TN'].values():\n tn0.extend(value)\n\n fp0 = list()\n for value in dist0['FP'].values():\n fp0.extend(value)\n\n fn0 = list()\n for value in dist0['FN'].values():\n fn0.extend(value)\n\n tp2 = list()\n for value in dist2['TP'].values():\n tp2.extend(value)\n\n tn2 = list()\n for value in dist2['TN'].values():\n tn2.extend(value)\n\n fp2 = list()\n for value in dist2['FP'].values():\n fp2.extend(value)\n\n fn2 = list()\n for value in dist2['FN'].values():\n fn2.extend(value)\n\n tp = list()\n tp.extend(tp0)\n tp.extend(tp2)\n tn = list()\n tn.extend(tn0)\n tn.extend(tn2)\n fp = list()\n fp.extend(fp0)\n fp.extend(fp2)\n fn = list()\n fn.extend(fn0)\n fn.extend(fn2)\n\n tp_counter = Counter(tp)\n tn_counter = Counter(tn)\n fp_counter = Counter(fp)\n fn_counter = Counter(fn)\n\n if print_dist:\n print(\"TP: {}\".format(tp_counter))\n print(\"TN: {}\".format(tn_counter))\n print(\"FP: {}\".format(fp_counter))\n print(\"FN: {}\".format(fn_counter))\n\n return tp_counter, tn_counter, fp_counter, fn_counter", "def n_odpairs_percentage(od_num, counted_od):\n start = 0\n total = 0\n for i in counted_od:\n if start < od_num:\n total += i\n start += 1\n else:\n break\n return total/np.sum(counted_od)", "def entity_relatedness(self, a, b):\n occ_a = self.occurrences(a)\n occ_b = self.occurrences(b)\n occ_common = occ_a.intersection(occ_b)\n\n try:\n logmax = max(len(occ_a), len(occ_b))\n logmin = min(len(occ_a), len(occ_b))\n logint = len(occ_common)\n return (logmax - logint) / (self.LOGW - logmin)\n except ValueError:\n return 0.0", "def p_obj1_given_no_obj2(self, obj1, obj2):\n if obj2 in self.prior[obj1]:\n obj1_and_obj2_count = self.prior[obj1][obj2]\n else:\n obj1_and_obj2_count = 0\n\n p = (self.sums[obj1] - obj1_and_obj2_count) / float(self.total_objects - obj1_and_obj2_count)\n assert 0 <= p and p <= 1, (p, obj1, obj2)\n return p", "def robbins(counts):\n return float(singles(counts))/counts.sum()", "def similarity_two_images_hog(img1: np.ndarray, img2: np.ndarray) -> np.ndarray:\n hog_image1 = hog_of_image(img1)\n hog_image2 = hog_of_image(img2)\n\n max_difference = max(2 * sum_all_magnitudes(img1), 2 * sum_all_magnitudes(img2))\n return 100 - 100 * np.sum(np.absolute(hog_image1 - hog_image2)) / max_difference", "def percent_frequencies(self):\n word_count = 0\n local = self.frequencies()\n for key in local.keys():\n i = local[key]\n word_count += int(i)\n for key in local.keys():\n i = local[key]\n percentage = float(i) / float(word_count)\n local[key] = percentage\n return local", "def count_words(title_pair: np.array) -> float:\r\n title_1, title_2 = title_pair\r\n # Transform into sets of words\r\n title_1 = set(title_1.split())\r\n title_2 = set(title_2.split())\r\n # Divide length of intersection by length of union\r\n ratio = len(title_1.intersection(title_2)) / len(title_1.union(title_2))\r\n return ratio", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n\n for element in d1:\n total += d1[element]\n\n for item in d2:\n if item in d1:\n score += math.log(d1[item]/total) * (d2[item])\n else:\n score += math.log(0.5/total) * (d2[item])\n return score", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n for key in d1:\n total += d1[key]\n for item in d2:\n if item in d1:\n score += d2[item] * math.log(d1[item]/total)\n else:\n score += d2[item] * math.log(0.5/total)\n return score", "def response_count_percentage(this_count):\n num_targets = db.session.query(ColourTargetColBG.id).count()\n return (this_count / num_targets) * 100.0", "def distributions_EMD(d1, d2):\n return ss.wasserstein_distance(d1.get_probs(), d2.get_probs()) / len(d1.get_probs())", "def get_percent_identity(seq1, seq2, count_gaps=False):\n\n # Make sure the sequence content is a string\n seq1 = str(seq1)\n seq2 = str(seq2)\n\n # print (seq1)\n # print (seq2)\n\n matches = sum(aa1 == aa2 for aa1, aa2 in zip(seq1, seq2) if aa1 != \"-\" and aa2 != \"-\")\n\n # Set the length based on whether we want identity to count gaps or not\n # length = len(seq1) if count_gaps else min(len(seq1.replace(\"-\", \"\"))- seq2.count(\"-\"), len(seq2.replace(\"-\", \"\")) - seq1.count(\"-\"))\n if count_gaps:\n length = len(seq1)\n else:\n length = sum ([1 for (aa1, aa2) in zip(seq1, seq2) if aa1 != \"-\" and aa2 != \"-\"])\n\n # print ('matches ', matches)\n # print ('length ', length)\n\n pct_identity = 100.0 * matches / length\n\n return pct_identity", "def count_difference(patch1, patch2):\n\n\treturn np.sum(np.square(patch1 - patch2))", "def pairwise_diversity(calls):\n # Count up the number of reference and alternate genotypes.\n if 0 in calls:\n ref_count = calls.count(0)\n else:\n return 0\n if 1 in calls:\n alt_count = calls.count(1)\n else:\n return 0\n # This sample size will change depending on how many non-missing genotypes\n # there are.\n total_count = ref_count + alt_count\n # Calculate up the similarities based on the number of reference and\n # alternative genotypes. Calculate the number of pairwise comparisons\n # that were made.\n ref_sim = n_choose_r(ref_count, 2)\n alt_sim = n_choose_r(alt_count, 2)\n total_comp = n_choose_r(total_count, 2)\n # Then pairwise diversity is 1-[(ref_sim + alt_sim)/total_comp]\n return 1 - ((ref_sim + alt_sim) / float(total_comp))", "def get_unique_covered_percentage(fuzzer_row_covered_regions,\n fuzzer_col_covered_regions):\n\n unique_region_count = 0\n for region in fuzzer_col_covered_regions:\n if region not in fuzzer_row_covered_regions:\n unique_region_count += 1\n return unique_region_count", "def adjust_ratio(cat1,cat2, sm_sentlen, pos_unigrs):\n cat1_count = pos_unigrs.get(cat1, 0.0)\n cat2_count = pos_unigrs.get(cat2, 0.0)\n if cat1_count == 0.0 or cat2_count == 0.0:\n return 0.0\n else:\n return smooth(cat1_count, sm_sentlen) / smooth(cat2_count, sm_sentlen)", "def percent_unique_ngrams_in_train(train_ngrams_dict, gen_ngrams_dict):\n\n # *Total* number of n-grams produced by the generator.\n total_ngrams_produced = 0\n\n for _, value in gen_ngrams_dict.iteritems():\n total_ngrams_produced += value\n\n # The unique ngrams in the training set.\n unique_ngrams_in_train = 0.\n\n for key, _ in gen_ngrams_dict.iteritems():\n if key in train_ngrams_dict:\n unique_ngrams_in_train += 1\n return float(unique_ngrams_in_train) / float(total_ngrams_produced)", "def compute_similarity(x, y, metric='kl_divergence'):\n from scipy.stats import entropy, pearsonr\n # remove zeros slightly increase divergence\n x = x[x != 0]\n y = y[y != 0]\n # outer join two distributions\n eps = min(x.min(), y.min()) / 10\n xy = pd.concat([x, y], axis=1).add(eps, fill_value=0)\n x = xy.iloc[:, 0]\n y = xy.iloc[:, 1]\n if metric == 'pearson':\n score, _ = pearsonr(x, y)\n else:\n score = entropy(x, y)\n return score", "def test_get_dup_labels_perc_all_valid(self):\r\n\r\n # No duplicates\r\n\r\n labels = ['seq1', 'seq2', 'seq3', 'seq4']\r\n\r\n actual_perc, dups = get_dup_labels_perc(labels)\r\n\r\n expected_perc = \"%1.3f\" % 0.0\r\n\r\n self.assertEqual(actual_perc, expected_perc)\r\n\r\n expected_dups = []\r\n\r\n self.assertEqual(dups, expected_dups)", "def dist(gene1, gene2):\n return abs(len(gene1.goal) - len(gene2.goal))", "def get_similarity_score(dict1, dict2, dissimilarity = False):\r\n DIFF = 0\r\n for i in dict1:\r\n x = False\r\n #Boolean used to not add repeated frequencies as it will be seen later\r\n for j in dict2:\r\n if i == j:\r\n #use of == instead of i in j as for example word \"meme\" could\r\n #be in \"memes\" and would therefore cause a problem\r\n DIFF += abs(dict1[i] - dict2[j])\r\n #if the word/n-gram appears in both dictionnaires then\r\n #the absolute value of the difference between the frequencies \r\n #in each dictionnary is added to DIFF\r\n x = True\r\n if x == False:\r\n #Boolean used so that frequencies of a word/n-gram are not added again\r\n #and again to DIFF\r\n DIFF += dict1[i] \r\n for j in dict2:\r\n x = False\r\n #same use of boolean for same reasons as previou for loop\r\n for i in dict1:\r\n if i == j:\r\n #use of == due to the same reason\r\n x = True\r\n #this time the absolute value of the difference between the\r\n #frequencies doesn't have to be added as it already has been\r\n if x == False:\r\n DIFF += dict2[j]\r\n ALL = 0\r\n for i in dict1:\r\n ALL += dict1[i]\r\n #all the frequencies of the first dictionnary are added to ALL\r\n for j in dict2:\r\n ALL += dict2[j]\r\n #same occurs as in the previous loop but for the second dictionnary\r\n \r\n #Depending on the input of dissimilarity this will occur\r\n if dissimilarity == False:\r\n result = round(100*(1 - (DIFF/ALL)))\r\n #similarity between the dictionnaries of word/n-grams is the result\r\n else:\r\n result = round(100*(DIFF/ALL))\r\n #dissimilarity between the dictionnaries of word/n-grams is the result\r\n return result", "def purity(cl: np.ndarray, org: np.ndarray):\n assert cl.shape == org.shape\n\n acc = 0\n for label in np.unique(cl):\n labels = {}\n for node in range(len(org)):\n if cl[node] == label:\n if org[node] not in labels.keys():\n labels[org[node]] = 0\n labels[org[node]] += 1\n acc += max(labels.values()) if labels.keys() else 0\n return acc / len(org)", "def compare_dictionaries(d1, d2):\r\n score = 0\r\n gef = 0\r\n for z in d1:\r\n gef += d1[z]\r\n total = gef\r\n \r\n for x in d2:\r\n if x in d1:\r\n score += math.log(d1[x] / total) * d2[x] \r\n else:\r\n score += math.log(0.5/total) * d2[x]\r\n return score", "def find_added_utility_between_dists(\n mRS_dist1,\n mRS_dist2,\n utility_weights=[]\n ):\n if len(utility_weights) < 1:\n utility_weights = np.array(\n [0.97, 0.88, 0.74, 0.55, 0.20, -0.19, 0.00])\n\n # Combine the two mRS distributions into one ordered list:\n mRS_dist_mix = np.concatenate((mRS_dist1, mRS_dist2))\n # Sort and remove the duplicate 1.0 at the end:\n mRS_dist_mix = np.sort(mRS_dist_mix)[:-1]\n # Add a 0.0 at the start:\n mRS_dist_mix = np.concatenate(([0.0], mRS_dist_mix))\n\n # Find the size of each bin (not cumulative):\n mRS_diff_mix = np.diff(mRS_dist_mix, prepend=0.0)\n\n # Store the mRS indices in here:\n x1_list = []\n x2_list = []\n # And store the utility values in here:\n u1_list = []\n u2_list = []\n for i, boundary in enumerate(mRS_dist_mix):\n # Find which mRS bin we're currently in:\n x1 = np.digitize(boundary, mRS_dist1, right=True)\n x2 = np.digitize(boundary, mRS_dist2, right=True)\n\n # Store values:\n x1_list.append(x1)\n x2_list.append(x2)\n u1_list.append(utility_weights[x1])\n u2_list.append(utility_weights[x2])\n\n # Find the increase in utility between dists 1 and 2:\n added_utils = np.array(u1_list) - np.array(u2_list)\n\n # Weight the increases by the proportion of the mRS distribution\n # that they span:\n weighted_added_utils = np.cumsum(added_utils * mRS_diff_mix)\n\n # Round the distribution values to three decimal places\n # - might not add up to 1 afterwards, but saves apparent rounding\n # errors in the printed utility and mRS change sums.\n mRS_dist_mix = np.round(mRS_dist_mix, 3)\n\n return mRS_dist_mix, weighted_added_utils, x1_list, x2_list", "def js_metric(df_1, df_2, numerical_columns, categorical_columns):\n\n res = {}\n STEPS = 100\n\n for col in categorical_columns:\n # to ensure similar order, concat before computing probability\n col_baseline = df_1[col].to_frame()\n col_sample = df_2[col].to_frame()\n col_baseline[\"source\"] = \"baseline\"\n col_sample[\"source\"] = \"sample\"\n\n col_ = pd.concat([col_baseline, col_sample], ignore_index=True)\n\n # aggregate and convert to probability array\n arr = (\n col_.groupby([col, \"source\"])\n .size()\n .to_frame()\n .reset_index()\n .pivot(index=col, columns=\"source\")\n .droplevel(0, axis=1)\n )\n arr_ = arr.div(arr.sum(axis=0), axis=1)\n arr_.fillna(0, inplace=True)\n\n # calculate js distance\n js_distance = jensenshannon(\n arr_[\"baseline\"].to_numpy(), arr_[\"sample\"].to_numpy()\n )\n\n res.update({col: js_distance})\n\n for col in numerical_columns:\n # fit gaussian_kde\n col_baseline = df_1[col]\n col_sample = df_2[col]\n kde_baseline = gaussian_kde(col_baseline)\n kde_sample = gaussian_kde(col_sample)\n\n # get range of values\n min_ = min(col_baseline.min(), col_sample.min())\n max_ = max(col_baseline.max(), col_sample.max())\n range_ = np.linspace(start=min_, stop=max_, num=STEPS)\n\n # sample range from KDE\n arr_baseline_ = kde_baseline(range_)\n arr_sample_ = kde_sample(range_)\n\n arr_baseline = arr_baseline_ / np.sum(arr_baseline_)\n arr_sample = arr_sample_ / np.sum(arr_sample_)\n\n # calculate js distance\n js_distance = jensenshannon(arr_baseline, arr_sample)\n\n res.update({col: js_distance})\n\n list_output = sorted(res.items(), key=lambda x: x[1], reverse=True)\n dict_output = dict(list_output)\n\n return dict_output", "def prob_larger_continuous(distr1, distr2):\n\n return distr1.expect(distr2.cdf)", "def compare_popularity(self, a, b):\n a_score = a['stats']['attending'] + a['stats']['maybe'] / 2.0\n b_score = b['stats']['attending'] + b['stats']['maybe'] / 2.0\n if a_score < b_score:\n return -1\n elif a_score > b_score:\n return 1\n else:\n return 0", "def test_valid_reported_frequency_distribution(self):\n s = private_sampling.PrivateThresholdSampleWithFrequencies(\n threshold=0.5, eps=0.1, delta=0.5**20)\n freq_dists = [\n s.compute_reported_frequency_dist(i) for i in range(100, 1001, 100)\n ]\n for dist in freq_dists:\n self.assertAlmostEqual(sum(dist.values()), 1.0)\n for x in dist.values():\n self.assertGreaterEqual(x, 0.0)", "def dist(a,b):\n dist = 0.\n for i in range(len(a)):\n dist += (b[i]-a[i])**2.\n\n dist = dist**.5\n return dist", "def d_nucross(a, b):\n ev = a / np.linalg.norm(a)\n return np.dot(d_unit_vector(a), d_ncross(ev, b))", "def update_fractional_counts(self,alignments,a_prob,t_prob,sentence_alignments):\n r = t_prob.shape[0]\n c = t_prob.shape[1]\n for i in range(r):\n for j in range(c):\n a_sum = 0\n n_sum = 0\n # Check presence of tuple in each aligment\n for a in range(len(alignments)):\n alignment = alignments[a]\n #Step 1 -fractional count of (e,f) counts in all alignments\n if (i,j) in alignment:\n a_sum += a_prob[a]\n #Step 2 -count of f in (e,f) in all alignments and normalize the fractional counts\n if j in [x[1] for x in alignment]:\n n_sum += a_prob[a]\n t_prob[i][j] = a_sum/n_sum", "def _histogram_intersection_distance(a, b):\n # branching version\n #return np.vstack((a, b)).min(axis=0).sum()\n\n # Non-branching version\n # noinspection PyUnresolvedReferences\n return (a + b - np.abs(a - b)).sum() * 0.5", "def ratio_func(a, b):\n return a / b", "def calc_stats(hits, misses):\n try:\n result = (float(misses) / float(hits)) * 100.0\n except ZeroDivisionError:\n if misses == 0:\n result = 0.0\n else:\n result = 100.0\n return result", "def compare(predictions, truth):\n comp = predictions - truth\n return 1 - (np.count_nonzero(comp) / len(predictions))", "def compare_iterables(iterable_a, iterable_b):\n\n similarity = 0\n difference = 0\n\n min_length = min(len(iterable_a), len(iterable_b))\n\n for i in range(0, min_length):\n if iterable_a[i] == iterable_b[i]:\n similarity += 1\n else:\n difference += 1\n\n ratio = similarity / (similarity+difference)\n return ratio", "def compare_trees(first_soup: HTMLStrip, second_soup: HTMLStrip) -> float:\n first_tree = Tree.from_soup_object(first_soup.file_name, first_soup.original_soup)\n second_tree = Tree.from_soup_object(second_soup.file_name, second_soup.original_soup)\n\n common_paths_size: int = first_tree.num_of_common_paths(second_tree)\n target_size: int = second_tree.total_num_of_paths()\n similarity = float(common_paths_size)*100/target_size\n print(f'{similarity:.2f}')\n return similarity", "def KolmogorovSmirnoff_statistics(dd1, dd2):\n cum1 = dd1.cumulative_distribution()\n cum2 = dd2.cumulative_distribution()\n minimum = max(cum1[0][0], cum2[0][0])\n maximum = max(cum1[-1][0], cum2[-1][0])\n index1 = len(cum1) - 1\n index2 = len(cum2) - 1\n summa1 = summa2 = 0\n\n difference = 0\n for i in reversed(range(minimum, maximum+1)):\n if cum1[index1][0] == i:\n summa1 = cum1[index1][1]\n index1 -= 1\n if cum2[index2][0] == i:\n summa2 = cum2[index2][1]\n index2 -= 1\n if abs(summa1 - summa2) > difference:\n difference = abs(summa1 - summa2)\n return difference" ]
[ "0.6724862", "0.6445435", "0.6334011", "0.6307524", "0.62770367", "0.6188019", "0.61835897", "0.6182407", "0.6136989", "0.60972196", "0.60946876", "0.6093589", "0.60753244", "0.6063225", "0.60029024", "0.5996274", "0.5993236", "0.5984397", "0.5978963", "0.59674275", "0.5963827", "0.59345955", "0.59183824", "0.5900122", "0.5884056", "0.58790845", "0.5879081", "0.58747566", "0.5870184", "0.58649904", "0.5859287", "0.58586556", "0.5845906", "0.5826268", "0.58262676", "0.58242697", "0.58122915", "0.5795361", "0.57953024", "0.5779847", "0.5778561", "0.5777415", "0.5750701", "0.5744285", "0.57431006", "0.57385194", "0.57170564", "0.5713362", "0.570976", "0.5708244", "0.57010025", "0.569519", "0.5686026", "0.5679558", "0.56750166", "0.5669792", "0.5654031", "0.5652581", "0.56501514", "0.56498796", "0.5642253", "0.5639422", "0.5639246", "0.5638574", "0.56344587", "0.5633646", "0.5629498", "0.5626746", "0.5616362", "0.5614748", "0.5608394", "0.560365", "0.558753", "0.5584051", "0.55787385", "0.557785", "0.5576504", "0.55672574", "0.5563406", "0.55631256", "0.5561615", "0.5555086", "0.5554737", "0.5552711", "0.5548973", "0.55367774", "0.5534155", "0.55080193", "0.55058545", "0.5494018", "0.54930663", "0.5483606", "0.54820704", "0.5480371", "0.54757446", "0.5474928", "0.5472775", "0.54725796", "0.5472432", "0.5472346" ]
0.62579167
5
Compute drift score as the percentage of overlapping probabilities
def compute_drift_score(ref_col_prob, col_prob): return sum(abs(np.asarray(ref_col_prob) - np.array(col_prob)) * 100)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate(self):\n\n gt = self.ground_truth.flatten().astype(np.int8)\n seg = self.segmentation.flatten().astype(np.int8)\n\n probability_difference = np.absolute(gt - seg).sum()\n probability_joint = (gt * seg).sum()\n\n if probability_joint != 0:\n return probability_difference / (2. * probability_joint)\n else:\n return -1", "def calc_match_probability(obs, pred1):\n \n # Throw away any non-atom columns\n obs_reduced = obs.loc[:, self.pars[\"atom_set\"].\n intersection(obs.columns)]\n pred1_reduced = pred1.loc[self.pars[\"atom_set\"].\n intersection(pred1.index)]\n \n # Calculate shift differences for each observed spin system\n delta = obs_reduced - pred1_reduced\n \n # Make a note of NA positions in delta, and set them to zero \n # (this avoids warnings when using norm.cdf later)\n na_mask = delta.isna()\n delta[na_mask] = 0\n \n if self.pars[\"prob_method\"] == \"delta_correlation\":\n overall_prob = pd.Series(index=delta.index)\n overall_prob[:] = 1\n \n d_mean = pd.read_csv(\"../data/d_mean.csv\", header=None, \n index_col=0).loc[delta.columns,1]\n d_cov = (pd.read_csv(\"../data/d_cov.csv\", index_col=0).\n loc[delta.columns,delta.columns])\n \n mvn = multivariate_normal(d_mean, d_cov)\n \n overall_prob = mvn.logpdf(delta)\n \n # Penalise missing shifts, unless also missing in predictions\n overall_prob = (overall_prob + log10(default_prob) * \n (na_mask.sum(axis=1) - pred1_reduced.isna().sum()))\n \n else:\n prob = delta.copy()\n prob.iloc[:,:] = 1\n \n for c in delta.columns:\n if self.pars[\"prob_method\"] == \"cdf\":\n # Use the cdf to calculate the probability of a \n # delta *at least* as great as the actual one\n prob[c] = log10(2) + norm.logcdf(-1*abs(\n pd.to_numeric(delta[c])), scale=atom_sd[c]*sf)\n elif self.pars[\"prob_method\"] == \"pdf\":\n prob[c] = norm.logpdf(pd.to_numeric(delta[c]), \n scale=atom_sd[c]*sf) \n elif shift_correlation:\n print(\"shift_correlation not yet implemented. Defaulting to pdf.\")\n prob[c] = norm.logpdf(pd.to_numeric(delta[c]), \n scale=atom_sd[c]*sf)\n else:\n print(\"Method for calculating probability not recognised. Defaulting to pdf.\")\n prob[c] = norm.logpdf(pd.to_numeric(delta[c]), \n scale=atom_sd[c]*sf)\n \n # In positions where data was missing, use default probability\n prob[na_mask] = log10(default_prob)\n \n # Calculate penalty for a HADAMAC mismatch\n if use_hadamac:\n # If the i-1 aa type of the predicted residue matches the \n # HADAMAC group of the observation, probability is 1.\n # Otherwise, probability defaults to 0.01\n prob[\"SS_classm1\"] = 0.01\n if type(pred1[\"Res_typem1\"])==str: # dummies have NaN\n prob.loc[obs[\"SS_classm1\"].str.find(\n pred1[\"Res_typem1\"])>=0, \"SS_classm1\"] = 1\n \n # Calculate overall probability of each row\n overall_prob = prob.sum(skipna=False, axis=1)\n \n return(overall_prob)", "def scoring_function(times):\n sorted_times = sorted(times)\n \n diffs = []\n for i in range(len(sorted_times)-1):\n diff = sorted_times[i+1]- sorted_times[i]\n \n if diff == 0.0: # overlaps cannot happen score with a large penalty\n diffs.append(-100)\n elif diff <= 1.0: # punish small differences\n diffs.append(-2)\n elif diff > 4.0: # Gaps greater than 4 are large enough and considered OK\n diffs.append(4.0)\n else:\n diffs.append(diff)\n \n return sum(diffs)", "def overlap_score(labels, labels_pred):\n raw_overlap = 1-fraction_mislabeled_nodes(labels, labels_pred)\n partition_true = np.array(labels).astype(int)\n partition_pred = np.array(labels_pred).astype(int)\n num_nodes = partition_pred.size\n num_groups = partition_true.max() + 1\n\n chance_level = 0.\n for i in range(num_groups):\n temp = np.sum(i == partition_true) / num_nodes\n if temp > chance_level:\n chance_level = temp\n\n score = (raw_overlap - chance_level) / (1 - chance_level)\n if score <= 0:\n score = 0\n\n return score", "def drift_score(self):\n if self.measured_val is None:\n return 0.0\n\n if self.rebalance_type == self.REBALANCE_TYPE_ABSOLUTE:\n return (self.measured_val - self.configured_val) / self.rebalance_thr\n else:\n return ((self.measured_val - self.configured_val) / self.configured_val) / self.rebalance_thr", "def calculate_precinct_score(pt, dstrct):\n return pt.F(dstrct)", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def getScore(self):\n tempscore = 1000 - 0.01*self.timeDriving \n tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)\n tempscore += self.checkpoint *1000\n tempscore += self.laps * 1000 * len(self.maze.checkpoints)\n return tempscore", "def dice_score(ground_truth, prediction):\r\n\r\n # Normalize\r\n prediction /= np.amax(prediction)\r\n ground_truth /= np.amax(ground_truth)\r\n\r\n true_positive_mask = np.logical_and(ground_truth==1, prediction==1)\r\n false_positive_mask = np.logical_and(ground_truth==0, prediction==1)\r\n false_negative_mask = np.logical_and(ground_truth==1, prediction==0)\r\n\r\n TP = np.count_nonzero(true_positive_mask)\r\n FP = np.count_nonzero(false_positive_mask)\r\n FN = np.count_nonzero(false_negative_mask)\r\n\r\n DSC = 2*TP / (2*TP + FP + FN)\r\n\r\n return DSC", "def pct_match(self, s1, s2, comp_length):\n\n matches = self.max_freq[s1:s1+comp_length] \\\n == self.max_freq[s2:s2+comp_length]\n return np.ma.sum(matches) / np.ma.count(matches)", "def RPS(y_true, y_pred) -> float:\n output = 0.\n data_num = len(y_true)\n for i in range(data_num):\n times = len(y_true[i]) - 1 \n cumulative_sum = 0.\n score = 0.\n for time in range(times):\n cumulative_sum += y_true[i,time] - y_pred[i,time]\n score += cumulative_sum ** 2\n score /= times\n output += score\n \n output /= data_num\n return output", "def expected_value(held_dice, num_die_sides, num_free_dice):\n list_scores = []\n die_sides = [die for die in range(1, num_die_sides + 1)]\n possible_seq = gen_all_sequences(die_sides, num_free_dice)\n for item in possible_seq:\n list_scores.append(score(held_dice + item))\n \n return float(sum(list_scores)) / len(list_scores)", "def calculate_probability(self):\n return 0", "def getScore(self, seq, start):\n sum = 0.0\n seqdata = seq.getSequence()[start : start+self.cols]\n for pos in range(len(seqdata)):\n q = self.counts[pos].getFreq(seqdata[pos])\n if q == 0:\n q = 0.0001 # to avoid log(0) == -Infinity\n logodds = math.log(q / self.background.getFreq(seqdata[pos]))\n sum += logodds\n return sum", "def delta(tval, tp_confidences, fp_confidences, num_samples):\n tp_percentage = \\\n np.sum([1 for x in tp_confidences if x > tval]) / num_samples\n if fp_confidences:\n fp_percentage = np.sum([1 for x in fp_confidences if x > tval]) / \\\n len(fp_confidences)\n else:\n fp_percentage = 0\n optimal_tp = len(tp_confidences) / num_samples\n delta_value = (tp_percentage - optimal_tp) ** 2 + fp_percentage ** 2\n return delta_value, tp_percentage, fp_percentage", "def percentOverlap(x1, x2):\n nonZeroX1 = np.count_nonzero(x1)\n nonZeroX2 = np.count_nonzero(x2)\n minX1X2 = min(nonZeroX1, nonZeroX2)\n percentOverlap = 0\n if minX1X2 > 0:\n percentOverlap = float(np.dot(x1.T, x2)) / float(minX1X2)\n return percentOverlap", "def mape(true, predictions):\n true = np.array(true)\n predictions = np.array(predictions) \n return np.mean(np.abs((true - predictions)) / true) * 100", "def calculate_percent_match(primers,\n seq_count,\n exclude_seq_count=1):\n # Calculate percent of sequences that are 'hit' by each primer\n for n in range(len(primers)):\n # Calculate percent perfect match\n primers[n].percent_match=float(primers[n].match_count/seq_count)\n primers[n].non_specific_percent=\\\n float(primers[n].non_specific_hits/exclude_seq_count)\n \n return primers", "def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = [number+1 for number in range(num_die_sides)]\n die_seqs = list(gen_all_sequences(outcomes, num_free_dice))\n for idx in range(len(die_seqs)):\n seq = list(die_seqs[idx])\n seq.extend(list(held_dice))\n die_seqs[idx] = tuple(seq)\n scr = 0.0\n for seq in die_seqs:\n scr += score(seq) \n return scr / len(die_seqs)", "def scoreRsrc( self, rr ):\r\n result = 0.0\r\n for tt in self.getSched( )[rr.getid( )]:\r\n for se in tt:\r\n result += 1\r\n print( \"INFO: Value for %s: %s \" % ( rr, result ) )\r\n return( result )", "def p(party, vote_count, s):\n return t(party, vote_count) / d(s)", "def contains_percentage_of(self, other: 'Interval') -> float:\n if other.length == 0:\n return other.a in self\n intersection = Interval.intersection([self, other])\n return intersection.length / other.length if intersection else 0.0", "def _calc_multiple_alignment_score(wrapped_data : tuple) -> int: \n (start, finish) = wrapped_data \n score_sum = 0.\n for dna_record in tqdm(dna_sequences[start : finish + 1], total=(finish + 1 - start), desc=\"Training process\"):\n score_sum += self.aligner.score(seq, dna_record.seq)\n return score_sum", "def expected_value(held_dice, num_die_sides, num_free_dice):\r\n die_outcomes = set(range(1, num_die_sides + 1))\r\n \r\n possible_sequences = gen_all_sequences(die_outcomes, num_free_dice)\r\n \r\n total_score = 0.0\r\n for sequence in possible_sequences:\r\n total_score += score(held_dice + sequence)\r\n \r\n return float(total_score / len(possible_sequences))", "def _calculate_score(predictions: np.ndarray, correct: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(np.log(predictions + 1) - np.log(correct + 1))) / len(correct))", "def expected_value(held_dice, num_die_sides, num_free_dice):\n result = 0\n outcomes = range(1, num_die_sides + 1)\n possible = sorted(gen_all_sequences(outcomes, num_free_dice))\n for hand in possible:\n result += score(held_dice + hand)\n return float(result)/len(possible)", "def tpr(positive, negative, fpr):\n threshold = np.percentile(np.asarray(negative), 100 - fpr)\n total_true_positives = sum(positive > threshold)\n\n return total_true_positives / len(positive)", "def expected_value(held_dice, num_die_sides, num_free_dice):\r\n \r\n scores = []\r\n \r\n die_sides = [(die + 1) for die in range(num_die_sides)]\r\n \r\n pos_outcomes = gen_all_sequences(die_sides, num_free_dice)\r\n\r\n for outcome in pos_outcomes:\r\n scores.append(score(held_dice + outcome))\r\n \r\n expected_result = float(sum(scores))/len(scores)\r\n \r\n return expected_result", "def detection_score(self, y_true, y_pred):\n ospa_score = ospa(y_true, y_pred, self.minipatch)\n return 1 - ospa_score", "def discrepancy_score(self, t, s):\n left = np.mean(self.dists[(t, s)])\n right = np.mean(self.dists[(s, t)])\n return 0.5 * (left + right)", "def get_estimated_score(match_data: dict) -> float:\n \n auto_high = {match_data['auto_HighClose']: match_data['auto_conInnerClose'],\n match_data['auto_HighFrontCP']: match_data['auto_conInnerFrontCP'],\n match_data['auto_HighLine']: match_data['auto_conInnerLine']\n }\n auto_low = match_data['auto_Low']\n auto_line = match_data['auto_leftSectorLine']\n \n tele_high = {match_data['tele_HighClose']: match_data['tele_conInnerClose'],\n match_data['tele_HighFrontCP']: match_data['tele_conInnerFrontCP'],\n match_data['tele_HighLine']: match_data['tele_conInnerLine'],\n match_data['tele_HighBackCP']: match_data['tele_conInnerBackCP']\n }\n tele_low = match_data['tele_Low']\n climbed = match_data['tele_Climbed']\n parked = match_data['tele_UnderSG']\n \n score = 0\n \n # Gives autonomous points\n for x in auto_high:\n score += (4.3, 4.8)[auto_high[x]] * x\n score += auto_low * 2\n if auto_line: score += 5\n \n # Gives teleop points\n for x in tele_high:\n score += (2.15, 2.4)[tele_high[x]] * x\n score += tele_low\n \n # Gives endgame points\n if climbed: score += 25\n if parked: score += 5\n \n return score", "def stats(detections, faces):\n vp, fp, fn, vn = 0, 0, 0, 0\n max_label = np.max(faces[:, 0])\n for i in range(max_label + 1):\n detections_i = get_label_with_index(detections, i)\n faces_i = get_label_with_index(faces, i)\n local_vp = 0\n for face in faces_i:\n found = False\n for detection in detections_i:\n if intersection_ratio(face, detection) >= 0.5:\n found = True\n break\n if found:\n vp += 1\n local_vp += 1\n else:\n fn += 1\n fp += len(detections_i) - local_vp\n\n precision = vp / (vp + fp)\n rappel = vp / (vp + fn)\n f_score = 2 * ((precision * rappel) / (precision + rappel))\n\n return precision, rappel, f_score", "def score(self):\n self.set_idx()\n if self.idx:\n diffs = self.diffs()\n weights = self.weights\n return np.sum(weights * diffs) / np.sum(weights)\n else:\n return 0.0", "def precision(ground_truth, prediction):\n ground_truth = remove_duplicates(ground_truth)\n prediction = remove_duplicates(prediction)\n precision_score = count_a_in_b_unique(prediction, ground_truth) / float(len(prediction))\n assert 0 <= precision_score <= 1\n return precision_score", "def score(self, predictions):\n return 0.", "def compute_effort(data):\n\t# get only the data (no timestamps)\n\tedata = data[1:8]\n\t(h,w) = np.shape(edata)\n\teffort = 0.0\n\tfor t in range(w):\n\t\tjoint = edata[:,t]\n\t\t#NOTE: used to be 2-norm: norm = np.linalg.norm(joint)\n\t\ttotal = np.sum(np.abs(joint))\n\t\teffort += total\n\n\treturn effort", "def dice_score(seg1, seg2):\n numerator = 2 * tf.reduce_sum(tf.cast(tf.equal(seg1, seg2), tf.int32))\n denominator = tf.size(seg1) + tf.size(seg2)\n score = numerator / denominator\n score = - tf.cast(score, tf.float32)\n return score", "def cal_expected_map(self, ranking_list, total_rel=0):\r\n s = 0.0\r\n pr = 0\r\n pn = 0\r\n for ele in reversed(ranking_list):\r\n rel_doc_cnt = ele[0]\r\n this_doc_cnt = ele[1]\r\n nonrel_doc_cnt = this_doc_cnt - rel_doc_cnt\r\n s += self.A(pr, pn, rel_doc_cnt, nonrel_doc_cnt)\r\n pr += rel_doc_cnt\r\n pn += nonrel_doc_cnt\r\n total_rel += rel_doc_cnt\r\n #print s/total_rel\r\n if total_rel == 0:\r\n return 0\r\n return s/total_rel", "def average_precision(tp,fp,npos):\r\n \r\n fp = np.cumsum(fp)\r\n tp = np.cumsum(tp)\r\n rec = tp / float(npos)\r\n # avoid divide by zero in case the first detection matches a difficult\r\n # ground truth\r\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\r\n \r\n # compute VOC AP using 11 point metric\r\n ap = 0.0\r\n for t in np.arange(0.0, 1.1, 0.1):\r\n if np.sum(rec >= t) == 0:\r\n p = 0\r\n else:\r\n p = np.max(prec[rec >= t])\r\n ap = ap + p / 11.0\r\n\r\n return ap", "def score_method(pairs_true, pairs_test):\n \n set_true = {tuple(e) for e in pairs_true}\n set_test = {tuple(e) for e in pairs_test}\n true_pos, false_pos, false_neg = confusion_stats(set_true, set_test)\n \n total = true_pos + false_pos + false_neg\n true_pos_rate = true_pos / total\n false_pos_rate = false_pos / total\n false_neg_rate = false_neg / total\n \n return true_pos_rate, false_pos_rate, false_neg_rate", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def compute_unique_count_drift(df_prob, ref_df_prob):\n\n df_diff = set(df_prob.keys()) - set(ref_df_prob.keys())\n ref_df_diff = set(ref_df_prob.keys()) - set(df_prob.keys())\n\n return sum([df_prob[k] for k in df_diff] + [ref_df_prob[k] for k in ref_df_diff])", "def expected_value(held_dice, num_die_sides, num_free_dice):\n all_sequences = gen_all_sequences(range(1,num_die_sides+1), num_free_dice)\n iter_seque=[]\n score_seque=[]\n for seq in all_sequences:\n iter_seque.append(list(seq)+list(held_dice))\n score_seque.append(score(iter_seque[-1]))\n return float(sum(score_seque))/float(len(score_seque))", "def calc_stay_prob(rollouts):\n states = rollouts.states\n actions = rollouts.actions\n rewards = rollouts.rewards\n\n num_test_episodes = states.shape[0]\n num_trials = states.shape[1]\n count_trial_stayed = 0.01 + np.zeros((2, 2, num_test_episodes)) # [common/uncommon, reward/unrewarded]\n count_trial_all = 0.01 + np.zeros((2, 2, num_test_episodes))\n for epi in range(num_test_episodes):\n for t in range(0, num_trials-2, 2):\n uncommon_transition = int(actions[epi, t] != states[epi, t+1]-1)\n count_trial_all[uncommon_transition, (0 if rewards[epi, t+1] else 1), epi] += 1\n count_trial_stayed[uncommon_transition, (0 if rewards[epi, t+1] else 1), epi] += \\\n int(actions[epi, t+2] == actions[epi, t])\n return np.divide(count_trial_stayed, count_trial_all), count_trial_stayed, count_trial_all", "def _window_hitprobability(self, x, y):\n hm_count = np.zeros_like(y).astype(float)\n hm = np.zeros_like(y).astype(float)\n #skf = StratifiedShuffleSplit(n_splits=self.n_iter, test_size=self.shuffle_test_split, random_state=self.random_state)\n skf = StratifiedShuffleSplit(n_splits=self.n_iter, test_size=self.shuffle_test_split, random_state=self.random_state)\n\n ind = self._cluster(x, x.shape[0])\n ind = np.argsort(ind)\n\n for i in range(self.n_iter):\n # variable window size between 10% and 50%\n window_size = np.random.randint(len(ind)*0.1, len(ind)*0.5)\n train, test = self._window_indexes(ind, window_size)\n self.basemodel.fit(x[train, :], y[train], hyperparams_optim=False)\n hm_count[test] += 1.\n hm[test] += (self.basemodel.predict(x[test, :]) == y[test]).astype(float)\n\n proba = hm / hm_count\n if self.verbose:\n # print('H/M count:')\n # print(hm_count)\n print('Proba:')\n print(proba)\n self.basemodel.fit(x, y, hyperparams_optim=False)\n return proba", "def label_accuracies(preds, labels):\n num_correct = num_correct_fun(preds, labels)\n return (num_correct / preds.size(0)) * 100.0", "def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget", "def _computeCondProb(self, testData, classValue):\n classAttrObj = self._classAttrs[classValue]\n frequencyDict = classAttrObj.frequencyDict\n totalDocsInClass = classAttrObj.totalDocsInClass\n\n result = (totalDocsInClass/self._totalTrainDocs) # P(c)\n # Compute P(t|c) for each t in d\n for word in testData:\n result *= ((frequencyDict.get(word, 0) + 1) / (sum(frequencyDict.values()) + self._sizeOfVocabulary))\n return result", "def winRate(DF):\r\n df = DF[\"return\"]\r\n pos = df[df>1]\r\n neg = df[df<1]\r\n return (len(pos) / len(pos + neg)) * 100", "def score_frames(segs):\n d = dict(D=0, I=0, F=0, M=0, Us=0, Ue=0, Os=0, Oe=0, TP=0, TN=0)\n for seg in segs:\n secs = (parse_date(seg[\"t2\"]) - parse_date(seg[\"t1\"])).total_seconds()\n if seg[\"score\"] == \"FP\" or seg[\"score\"] == \"FN\":\n if seg.get(\"err\"):\n d[seg[\"err\"]] += secs\n elif seg[\"score\"] == \"TP\" or seg[\"score\"] == \"TN\":\n d[seg[\"score\"]] += secs\n\n d[\"P\"] = d[\"D\"] + d[\"F\"] + d[\"Us\"] + d[\"Ue\"] + d[\"TP\"] #positive frames\n d[\"N\"] = d[\"I\"] + d[\"M\"] + d[\"Os\"] + d[\"Oe\"] + d[\"TN\"] #negative frames\n\n #calculate frame ratessiter\n ret = dict(p_rates={}, n_rates={}, frame_counts=d)\n\n POS = [\"D\", \"F\", \"Us\", \"Ue\", \"TP\"]\n NEG = [\"I\", \"M\", \"Os\", \"Oe\", \"TN\"]\n\n if d[\"P\"]:\n for i in POS:\n ret[\"p_rates\"][i+\"r\"] = d[i]*1.0 / d[\"P\"]\n\n if d[\"N\"]:\n for i in NEG:\n ret[\"n_rates\"][i+\"r\"] = d[i]*1.0 / d[\"N\"]\n if d[\"P\"] or d[\"N\"]: \n ret[\"acc\"] = (d[\"TP\"]*1.0 + d[\"TN\"]) / (d[\"P\"]*1.0 + d[\"N\"]) \n ret[\"p_rate\"] = d[\"P\"]*1.0/(d[\"P\"] + d[\"N\"])\n ret[\"n_rate\"] = d[\"N\"]*1.0/(d[\"P\"] + d[\"N\"])\n return ret", "def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc", "def score(self):\n # loop over aminoacids in protein and calculate how often H and C are surrounded by H and C\n for aminoacid in self.aminoacids:\n if aminoacid.aminoacid_type == \"H\":\n self.stability = self.stability + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number))\n elif aminoacid.aminoacid_type == \"C\":\n self.stability = self.stability + (-5 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number))\n self.stability = self.stability/2\n return int(self.stability)", "def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))", "def RScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 5\n elif x <= d[p][0.4]:\n return 4\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 2\n else:\n return 1", "def RScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 5\n elif x <= d[p][0.4]:\n return 4\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 2\n else:\n return 1", "def calculate_performance(scorecard):\n \n scorecard_array = numpy.asarray(scorecard)\n performance = scorecard_array.sum() / scorecard_array.size\n\n return performance", "def baseline_score(self,t0,t1):\n return len(set(t0) & set(t1))/len(set(t0).union(set(t1)))", "def score_events(truths, detected, segs):\n\n #1st pass, so-called 'trivial' assignments from segments\n for d in truths+detected:\n for seg in segs:\n if time_overlap(d, seg):\n if seg.get(\"err\") == \"D\":\n d[\"event_score\"] = EVENT_DELETION\n elif seg.get(\"err\") == \"F\":\n d[\"event_score\"] = FRAGMENTED_EVENT\n elif seg.get(\"err\") == \"I\":\n d[\"event_score\"] = INSERTION_RETURN\n elif seg.get(\"err\") == \"M\": \n d[\"event_score\"] = MERGING_RETURN\n\n #2nd pass, overlaps between scored events\n for d in detected:\n for t in truths:\n if time_overlap(d, t):\n if d.get(\"event_score\") == MERGING_RETURN:\n if t.get(\"event_score\") == FRAGMENTED_EVENT:\n t[\"event_score\"] = FRAGMENTED_AND_MERGED\n else:\n t[\"event_score\"] = MERGED_EVENT\n\n if t.get(\"event_score\") == FRAGMENTED_EVENT:\n if d.get(\"event_score\") == MERGING_RETURN:\n d[\"event_score\"] = FRAGMENTING_AND_MERGING\n else:\n d[\"event_score\"] = FRAGMENTING_RETURN\n \n #3rd pass, anything so far unscored is then Correct\n for d in detected+truths:\n if not d.get(\"event_score\"):\n d[\"event_score\"] = CORRECT\n \n #count up the scores\n d_counts = {CORRECT:0,\n FRAGMENTING_RETURN:0,\n MERGING_RETURN:0,\n FRAGMENTING_AND_MERGING:0, \n INSERTION_RETURN:0}\n\n t_counts = {CORRECT:0,\n EVENT_DELETION:0,\n FRAGMENTED_EVENT:0,\n FRAGMENTED_AND_MERGED:0,\n MERGED_EVENT:0}\n\n for d in detected:\n d_counts[d.get(\"event_score\")] += 1\n\n for v in truths:\n t_counts[v.get(\"event_score\")] += 1\n\n return dict(truths=truths, detected=detected, d_counts=d_counts, t_counts=t_counts, d_rates=pct_dict(d_counts), t_rates=pct_dict(t_counts))", "def mapd(self) -> float:\n a = np.sum(np.abs(self.predicted - self.true))\n b = np.sum(np.abs(self.true))\n return float(a / b)", "def get_h_score(start, end):\n #uses a heuristic function\n #return 0 #used if you want Djikstras algorithm\n return (abs(end[0]-start[0])+abs(end[1]-start[1])) * 10", "def depiction_score(self):\n\n collision_penalty = 1\n degenerated_penalty = 0.4\n\n bond_collisions = self.count_bond_collisions()\n degenerated_atoms = self.count_suboptimal_atom_positions(0.0, 0.5)\n\n score = (\n collision_penalty * bond_collisions\n + degenerated_penalty * degenerated_atoms\n )\n\n return round(score, 1)", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n \n for i in d1:\n total = total + d1[i]\n for i in d2:\n if i in d1:\n if total == 0:\n score = score\n else:\n probablility = (d1[i] / total)\n score = score + (math.log10(probablility) * d2[i])\n else:\n if total == 0:\n score = score\n else:\n score = score + ((0.5 / total) * d2[i])\n return score", "def calculate_score(origin, sentences):\n result = 0\n if len(origin):\n result=sum(compare_with_bounds(origin, sentence) for sentence in sentences) / float(len(sentences))\n return result", "def compute_rt(rs, pred_prob, h):\r\n rp = rs*pred_prob\r\n rph = rp*h\r\n # probability of change point\r\n cp_prob = rph.sum()\r\n # probability that each run grows\r\n growth_prob = rp - rph\r\n rt = np.r_[cp_prob, growth_prob]\r\n return rt/rt.sum()", "def score(stripe1, stripe2):\n scr = 0\n count = 0\n for p1, p2 in zip(stripe1, stripe2):\n r = abs(p1[0] - p2[0])\n g = abs(p1[1] - p2[1])\n b = abs(p1[2] - p2[2])\n scr += r + g + b\n return scr", "def __get_score(self):\n for pair in zip(self.nu[self.nu_idx:], self.sw[self.sw_idx:]):\n if pair[0] == pair[1]:\n self.score += 1\n else:\n break", "def compute_score(self):\n for i in xrange(FRAMES):\n # STRIKE\n if self.frames[i][0] == 10:\n # CONSECUTIVE STRIKE\n if self.frames[i + 1][0] == 10:\n self.scores.append(self.frames[i][0] +\n self.frames[i + 1][0] +\n self.frames[i + 2][0])\n else:\n self.scores.append(self.frames[i][0] +\n self.frames[i + 1][0] +\n self.frames[i + 1][1])\n # SPARE\n elif (self.frames[i][0] + self.frames[i][1] == 10):\n self.scores.append(self.frames[i][0] + self.frames[i][1] +\n self.frames[i + 1][0])\n # NEITHER\n else:\n self.scores.append(self.frames[i][0] + self.frames[i][1])\n # Total Score\n for score in self.scores:\n self.score += score", "def get_score(self, solution: np.array) -> float:\n pass", "def probability(s, a, b):\r\n return s.cdf(b) - s.cdf(a)", "def DCG_p(results, topic, p):\n rel = lambda label: gold_topic_labels[topic][label]\n top_p = results[:p]\n dcg = 0\n for idx, label in enumerate(top_p):\n rank = idx + 1\n if idx == 0:\n dcg += rel(label)\n continue\n dcg += rel(label)/ math.log(rank,2)\n return dcg", "def calc_score(pins_stats):\n count = 0\n new = pins_stats[:, :2] - ORIG_PINS_LOC\n for p in new:\n if np.linalg.norm(p) > R_PIN / 2:\n count += 1\n return count", "def find_pcts(p1, p2, start_b = [], iter = 10000):\n win_record = []\n for i in range(iter):\n deck = Deck()\n need = 5 - len(start_b)\n b2 = draw_sure(deck, need, p1+p2+start_b)\n win_record.append(_who_wins(start_b + b2, p1, p2, printout = False))\n return [win_record.count(1) / float(len(win_record)), \n win_record.count(2) / float(len(win_record))\n ]", "def diversion_score(X, offspring_list):\r\n similarity_sum = 0\r\n if len(offspring_list[0]) == 2:\r\n offspring_list = [(parent_a, offspring, parent_a) for (parent_a, offspring) in offspring_list]\r\n for (parent_a, offspring, parent_b) in offspring_list:\r\n similarity_sum += max(icc(parent_a, offspring), icc(parent_b, offspring))\r\n return (1 - (((similarity_sum / len(offspring_list)) + 1) / 2)) * 100 # move from [-1,1] to [0,2], then to [0,1], then inverse, finally move to [0,100]\r", "def perfect_acc(abst_setpoint, gameboard):\n correct_fraction = (gameboard.ncell - gameboard.pr_mislabel * gameboard.nnoisy) / gameboard.ncell\n predicted_fraction = 1.0 - abst_setpoint\n return np.minimum(1.0, correct_fraction/predicted_fraction)", "def pred_overlap(t, h):\n a_set = set(get_pred(t))\n b_set = set(get_pred(h))\n return len(a_set&b_set)/float(len(a_set|b_set))", "def cps(self):\n return self.datacounts / self.exptime", "def calculate_p(candidate, reference):\n matches = 0\n for grama in candidate:\n if grama in reference:\n matches += 1\n return matches/len(candidate)", "def score_results(results):\n truth_time = 0\n truths = results[\"labels\"]\n detected = results[\"detected\"]\n\n for truth in truths:\n truth_time += (parse_date(truth[\"t2\"]) - parse_date(truth[\"t1\"])).seconds\n\n overlapped = False\n overlaps = defaultdict(list) #list of state index that overlap truth keyed on truth index\n\n segs = extract_segments(results)\n segs = score_segments(segs, truths, detected)\n\n return dict(segments=segs,\n frame_score=score_frames(segs),\n events=score_events(truths, detected, segs))", "def __calc_s(self, df):\n df.loc[:, \"avg_num_drivers\"] = df.idle + df.incoming\n s = df.total / df.avg_num_drivers # df.total := amount of demand\n s[s > 1] = 1\n s[np.isnan(s)] = 0.0001\n s[np.isinf(s)] = 1\n\n df.loc[:, \"prob_of_s\"] = s\n df = df[[\"zone_id\", \"prob_of_s\"]]\n return df", "def calculateP(SD, numDiff):\n return numDiff/SD", "def calculateP(SD, numDiff):\n return numDiff/SD", "def cronbach_alpha(self) -> float:\n itemscores = np.stack([self.true, self.predicted])\n itemvars = itemscores.var(axis=1, ddof=1)\n tscores = itemscores.sum(axis=0)\n nitems = len(itemscores)\n return float(nitems / (nitems - 1.) * (1 - itemvars.sum() / tscores.var(ddof=1)))", "def performace_measure(data,pred):\n true = data['clicks']\n weights = weighting(data)\n diff = true-pred.astype(int)\n return np.sqrt(np.inner(weights,diff*diff)/weights.sum())", "def overlapPercent(box1, box2):\n xx2 = min(box1[2], box2[2])\n xx1 = max(box1[0], box2[0])\n yy2 = min(box1[3], box2[3])\n yy1 = max(box1[1], box2[1])\n w = max(0, xx2 - xx1 + 1)\n h = max(0, yy2 - yy1 + 1)\n areaBox1 = boundingBoxArea(box1)\n areaBox2 = boundingBoxArea(box2)\n overlap = max(w * h / areaBox1, w * h / areaBox2)\n return overlap", "def get_accuracy(corrs, events, n_events, outcomes):\n\n ldl_tp = sum([events[i] == outcomes[np.nanargmax(corrs[:, i])] for i in range(n_events)])\n return (100 * ldl_tp) / n_events", "def percentage(self):\n temp = self.cpu_freq_time_spent.copy()\n for i in self.cpu_freq_time_spent:\n total = 0\n for j in self.cpu_freq_time_spent[i]:\n total += self.cpu_freq_time_spent[i][j]\n for j in self.cpu_freq_time_spent[i]:\n if total != 0:\n temp[i][j] = self.cpu_freq_time_spent[i][j] * 100 / total\n else:\n temp[i][j] = 0\n return temp", "def _overlap_energy(self, this, that):\n if not this.overlaps(that):\n return 0.0\n\n return min(10.0 / this.rank, 10.0 / that.rank)", "def expected_value(held_dice, num_die_sides, num_free_dice):\n\n outcome = ()\n for die in range(1, num_die_sides + 1):\n outcome +=(die, )\n possible_outcomes = gen_all_sequences(outcome, num_free_dice)\n output = 0\n for single_output in possible_outcomes:\n current_score = score(single_output + held_dice)\n output += current_score\n\n return output/(len(possible_outcomes)*1.0)", "def calc_prob(data):\n total = len(data)\n frequencies = sorted(Counter(data).items())\n probabilities = OrderedDict()\n for (key, value) in frequencies:\n probabilities[key] = value / total\n return probabilities", "def get_log_odds_score(observed, expected):\n log_ratio = dict()\n for pe in expected.keys():\n try:\n log_ratio[pe] = int(round(2*log(observed[pe]/expected[pe], 2),0))\n except KeyError:\n log_ratio[pe] = int(-99)\n\n return log_ratio", "def calc_match_points(self, match):\n if match.winner == match.TIE:\n match.home.tournament_score += 1\n match.away.tournament_score += 1\n else:\n match.winner.tournament_score += 3\n match.loser.tournament_score += 0", "def mape(self) -> float:\n return float(np.mean(np.abs((self.true - self.predicted) / self.true)) * 100)", "def score_professor_conflicts(self):\n prof_conflict_score = 0\n multiplier = 4\n \n for day_num in range(self.num_days):\n \n current_day = self.days[ day_num ]\n num_conflicts = 0\n \n for prof_name in current_day.keys():\n if not self.get_prof_by_name[prof_name].available( day_num ):\n num_conflicts += 1\n \n prof_conflict_score += multiplier * ( num_conflicts ** 2 )\n \n self.prof_conflict_score = prof_conflict_score\n return self.prof_conflict_score", "def set_ts_percentage(self):\n bx = self.get_standard_stats()\n ptos = float(bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n tsAttempts = float(tcInt + (0.44*float(bx[\"tl_int\"])))\n result = 0.00\n if tsAttempts > 0.00:\n result = (ptos/(2*tsAttempts))*100\n self.ts_percentage = \"%.2f\" % round(result, 2)", "def precision(self, user_list):\n hit = 0\n all_recom = 0\n print('Calculate precision: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n recom_item = set([data[0] for data in recom_data])\n user_item = set(\n self.test[self.test['userId'] == user]['movieId'].values)\n overlap = recom_item & user_item\n hit += len(overlap)\n all_recom += len(recom_item)\n print('\\nprecision is: ', hit / (all_recom * 1.0))\n return hit / (all_recom * 1.0)", "def partial_match_score(\n truth: List[Rationale], pred: List[Rationale], thresholds: List[float]\n) -> List[PartialMatchScore]:\n\n ann_to_rat = _keyed_rationale_from_list(truth)\n pred_to_rat = _keyed_rationale_from_list(pred)\n\n num_classifications = {k: len(v) for k, v in pred_to_rat.items()}\n num_truth = {k: len(v) for k, v in ann_to_rat.items()}\n ious: Dict[str, Dict[str, float]] = defaultdict(dict)\n for k in set(ann_to_rat.keys()) | set(pred_to_rat.keys()):\n for p in pred_to_rat.get(k, []):\n best_iou = 0.0\n for t in ann_to_rat.get(k, []):\n num = len(\n set(range(p.start_token, p.end_token))\n & set(range(t.start_token, t.end_token))\n )\n denom = len(\n set(range(p.start_token, p.end_token))\n | set(range(t.start_token, t.end_token))\n )\n iou = 0 if denom == 0 else num / denom\n if iou > best_iou:\n best_iou = iou\n ious[k][p] = best_iou\n\n scores: List[PartialMatchScore] = []\n for threshold in thresholds:\n threshold_tps: Dict[str, float] = {}\n for k, vs in ious.items():\n threshold_tps[k] = sum(int(x >= threshold) for x in vs.values())\n micro_r = (\n sum(threshold_tps.values()) / sum(num_truth.values())\n if sum(num_truth.values()) > 0\n else 0\n )\n micro_p = (\n sum(threshold_tps.values()) / sum(num_classifications.values())\n if sum(num_classifications.values()) > 0\n else 0\n )\n micro_f1 = _f1(micro_r, micro_p)\n macro_rs = list(\n threshold_tps.get(k, 0.0) / n if n > 0 else 0 for k, n in num_truth.items()\n )\n macro_ps = list(\n threshold_tps.get(k, 0.0) / n if n > 0 else 0\n for k, n in num_classifications.items()\n )\n macro_r = sum(macro_rs) / len(macro_rs) if len(macro_rs) > 0 else 0\n macro_p = sum(macro_ps) / len(macro_ps) if len(macro_ps) > 0 else 0\n macro_f1 = _f1(macro_r, macro_p)\n\n scores.append(\n PartialMatchScore(\n threshold=threshold,\n micro=InstanceScore(p=micro_p, r=micro_r, f1=micro_f1),\n macro=InstanceScore(p=macro_p, r=macro_r, f1=macro_f1),\n )\n )\n\n return scores", "def __call__(self, relsSortedByScores, qrelDict):\n result = 0.\n postQty = len(qrelDict)\n\n pos = 0\n for i, rel in enumerate(relsSortedByScores):\n if rel > RELEVANCE_THRESHOLD:\n pos += 1.\n result += pos / (i + 1.)\n\n return result / postQty", "def calculate(self):\n\n s_sum = 0\n class_num = len(self.scores)\n \n for i in range(class_num):\n s_sum += self.scores[i]\n\n av = float(s_sum)/class_num\n if av >= 90:\n return 'O'\n elif av >= 80:\n return 'E'\n elif av >= 70:\n return 'A'\n elif av >= 55:\n return 'P'\n elif av >= 40:\n return 'D'\n else:\n return 'T'", "def success_rate(x_tapes):\n return np.sum([is_success(x_tape) for x_tape in x_tapes]) / len(x_tapes)", "def score(self, model, probe):\n return scipy.spatial.distance.euclidean(model, probe)" ]
[ "0.6732613", "0.6518283", "0.6426524", "0.6424136", "0.6334499", "0.6254541", "0.6188233", "0.61744064", "0.6095827", "0.6077343", "0.6045876", "0.5990594", "0.5986692", "0.59864265", "0.59614843", "0.5960128", "0.5936167", "0.5935803", "0.59078395", "0.590509", "0.58760846", "0.58743113", "0.58386606", "0.5834036", "0.5828479", "0.5812159", "0.580872", "0.5802769", "0.5790495", "0.5785718", "0.5773555", "0.57523054", "0.57508934", "0.57501405", "0.57300305", "0.57258797", "0.57253784", "0.5722177", "0.57190233", "0.5716108", "0.5711455", "0.5702197", "0.5702128", "0.5701891", "0.56997013", "0.56956047", "0.5694018", "0.5679468", "0.56740856", "0.5674054", "0.5658515", "0.5658295", "0.5656546", "0.56488526", "0.56488526", "0.5647027", "0.5646325", "0.5645572", "0.5642374", "0.5642203", "0.5636588", "0.56329125", "0.56285715", "0.56277066", "0.5627626", "0.56272095", "0.5625794", "0.5620691", "0.56192684", "0.56052196", "0.5593044", "0.5587239", "0.558619", "0.5585599", "0.5585213", "0.5580891", "0.55771434", "0.5569866", "0.55683357", "0.5564656", "0.5564656", "0.556056", "0.5558965", "0.5558468", "0.5553626", "0.5553031", "0.5548318", "0.55470395", "0.5546349", "0.5545604", "0.5545586", "0.55405825", "0.55353326", "0.5533428", "0.5521619", "0.5515949", "0.55144686", "0.5513158", "0.55109", "0.5504699" ]
0.7712985
0
Combine training and inference datasets as one data frame
def combine_train_infer(train_file, infer_dir): train_df = pd.read_feather(train_file) time_range = range(len([f for f in os.listdir(infer_dir) if 'feather' in f])) infer_df_list = [pd.read_feather(f'{infer_dir}/{t}.feather') for t in time_range] comb_df_list = [] train_df.index = [-1] * len(train_df) comb_df_list.append(train_df) for t in time_range: df = infer_df_list[t] df.index = [t] * len(df) comb_df_list.append(df) return pd.concat(comb_df_list), train_df, infer_df_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_training_data():\n \n X = pd.read_csv('../data/train_values.csv').set_index('sequence_id')\n y = pd.read_csv('../data/train_labels.csv').set_index('sequence_id')\n return X, y", "def triples(self):\n return pd.concat((self._load_train(), self._load_valid(), self._load_test()))", "def _prepare_inference_data(self, df: pd.DataFrame) -> pd.DataFrame:\n # TODO Is the target encoding necessary?\n if len(set(self.target) - set(df.columns)) > 0:\n if self.config.task == \"classification\":\n df.loc[:, self.target] = np.array([self.label_encoder.classes_[0]] * len(df)).reshape(-1, 1)\n else:\n df.loc[:, self.target] = np.zeros((len(df), len(self.target)))\n df, _ = self.preprocess_data(df, stage=\"inference\")\n return df", "def get_training_and_testing_sets(data, Y):\r\n data = pd.concat([data, Y], axis=1)\r\n x,y=data.shape\r\n train_X_sub1=data[0:x//6]\r\n dev_X_sub1 = data[x//6:x//6 + x//12]\r\n test_X_sub1 = data[x//6 + x//12:x//3]\r\n\r\n train_X_sub2 = data[x//3:x//3+x//6]\r\n dev_X_sub2 = data[x//6 + x//3:x//3 + x//6 + x//12]\r\n test_X_sub2 = data[x//3 + x//6 + x//12:2*x//3]\r\n\r\n train_X_sub3 = data[2*x//3:(2*x//3) +x//6]\r\n dev_X_sub3 = data[x//6 + 2*x//3: (2*x//3) + x//6 + x//12]\r\n test_X_sub3 = data[2*x//3 + x//6 + x//12:x]\r\n\r\n train_X=train_X_sub1.append(train_X_sub2,ignore_index = True)\r\n train_X =train_X.append(train_X_sub3,ignore_index = True)\r\n dev_X= dev_X_sub1.append(dev_X_sub2,ignore_index = True)\r\n dev_X = dev_X.append(dev_X_sub3,ignore_index = True)\r\n test_X = test_X_sub1.append(test_X_sub2,ignore_index = True)\r\n test_X = test_X.append(test_X_sub3,ignore_index = True)\r\n\r\n\r\n train_X = util.shuffle(train_X)\r\n train_X = train_X.reset_index(drop=True)\r\n\r\n dev_X = util.shuffle(dev_X)\r\n dev_X = dev_X.reset_index(drop=True)\r\n\r\n test_X = util.shuffle(test_X)\r\n test_X = test_X.reset_index(drop=True)\r\n\r\n train_X_final=train_X\r\n dev_X_final = dev_X\r\n test_X_final = test_X\r\n x, y = train_X_final.shape\r\n train_X = train_X_final.iloc[:, 0:y - 1]\r\n train_Y = train_X_final.iloc[:, y - 1]\r\n\r\n x, y = test_X_final.shape\r\n test_X = test_X_final.iloc[:, 0:y - 1]\r\n test_Y = test_X_final.iloc[:, y - 1]\r\n\r\n x, y = dev_X_final.shape\r\n dev_X = dev_X_final.iloc[:, 0:y - 1]\r\n dev_Y = dev_X_final.iloc[:, y - 1]\r\n\r\n return train_X, train_Y, dev_X,dev_Y,test_X, test_Y", "def get_training_and_validation_df():\n df = get_cleaned_processed_df()\n val_df = pd.DataFrame.from_csv(VALIDATION_DATA_PATH)\n y_train = df.pop(\"label\")\n y_val = val_df.pop(\"label\")\n\n df, val_df = complete_columns(df, val_df)\n df.fillna(0, inplace=True)\n val_df.fillna(0, inplace=True)\n df = fill_text_features(df)\n val_df = fill_text_features(val_df)\n\n df = drop_text_features(df)\n val_df = drop_text_features(val_df)\n return df.values, y_train, val_df.values, y_val", "def split_dataset(df_playlists, df_interactions):\n df_train_pl, cat_pids = generate_train(df_playlists)\n df_test_pl, df_test_itr, df_eval_itr, df_train_itr = generate_test(cat_pids, df_playlists, df_interactions)\n\n return df_train_pl, df_train_itr, df_test_pl, df_test_itr, df_eval_itr", "def load_data():\n train = pd.read_csv(\"../input/train.csv\", dtype={\"Age\": np.float64}, )\n test = pd.read_csv(\"../input/test.csv\", dtype={\"Age\": np.float64}, )\n\n train = train.set_index('PassengerId')\n test = test.set_index('PassengerId')\n\n train = train.apply(preprocess, axis=1)\n test = test.apply(preprocess, axis=1)\n\n x_train = train.drop(['Survived'], axis=1)\n y_train = train['Survived']\n x_test = test\n return {'train': {'x': x_train, 'y': y_train},\n 'test': {'x': x_test},\n 'full_features': pd.concat([x_train, x_test])}", "def build_all_datasets(\n cfg, tokenizer, train_valid_test_num_samples,\n):\n train_dataset = RetroQAFineTuneDataset(\n cfg.train_ds.get('file_name'),\n tokenizer,\n cfg.train_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.train_ds.get('seq_length'),\n cfg.train_ds.get('add_bos'),\n cfg.train_ds.get('add_eos'),\n train_valid_test_num_samples[0],\n cfg.train_ds.get('seed'),\n cfg.train_ds.get('neighbors'),\n )\n val_dataset = RetroQAFineTuneDataset(\n cfg.val_ds.get('file_name'),\n tokenizer,\n cfg.val_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.val_ds.get('seq_length'),\n cfg.val_ds.get('add_bos'),\n cfg.val_ds.get('add_eos'),\n train_valid_test_num_samples[1],\n cfg.val_ds.get('seed'),\n cfg.val_ds.get('neighbors'),\n )\n test_dataset = RetroQAFineTuneDataset(\n cfg.test_ds.get('file_name'),\n tokenizer,\n cfg.test_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.test_ds.get('seq_length'),\n cfg.test_ds.get('add_bos'),\n cfg.test_ds.get('add_eos'),\n train_valid_test_num_samples[2],\n cfg.test_ds.get('seed'),\n cfg.test_ds.get('neighbors'),\n )\n\n return train_dataset, val_dataset, test_dataset", "def load_data_wrapper():\n\ttr_d, te_d = load_data()\n\ttraining_inputs = [np.reshape(x, (4, 1)) for x in tr_d[0]]\n\ttraining_results = [vectorized_result(y) for y in tr_d[1]]\n\ttraining_data = zip(training_inputs, training_results)\n\ttest_inputs = [np.reshape(x, (4, 1)) for x in te_d[0]]\n\ttest_data = zip(test_inputs, te_d[1])\n\treturn (training_data, test_data)", "def create_pandas_dataframes():\n train, test = Email.load_emails_from_data()\n\n train_y = [int(t.is_spam) for t in train]\n test_y = [int(t.is_spam) for t in test]\n\n vocab = get_vocabulary_vector(train)\n print(\"[ INF ] Vocab Size:\", len(vocab))\n\n train = [t.vectorize_tokens(vocab) for t in train]\n test = [t.vectorize_tokens(vocab) for t in test]\n\n train = pd.DataFrame.from_records(train, columns=vocab)\n test = pd.DataFrame.from_records(test, columns=vocab)\n\n train['is_spam'] = train_y\n test['is_spam'] = test_y\n\n return train, test", "def train_and_eval_all_models():\n\n clfShape, accShape = shape_symmetry_train_classifier()\n clfTexture, accTexture = texture_symmetry_train_classifier()\n clfFinal, accFinal = combined_symmetry_train_classifier()\n\n return accShape, accTexture, accFinal", "def phase_two_data():\n from pathlib import Path\n try:\n import cPickle as pickle\n except ImportError:\n import pickle\n \n from annotation import parse_fulltext\n from features import ALL_FEATURES\n \n from feature_template import apply_templates\n from feature_selection import filter_by_frequency\n from feature_encoding import encode\n\n # Feature templates considered if heading by 1:\n # ----------------------------\n # Position + Voice\n # Path length + Clause layer\n # 1 Predicate + Path\n # Path + Position + Voice\n # Path + Position + Voice + Predicate\n # 1 Head word stem + Predicate\n # 1 Head word stem + Predicate + Path\n # 1 Head word stem + Phrase\n # Clause layer + Position + Predicate\n templates = [tuple([f.name]) for f in ALL_FEATURES] + \\\n [('path_to_frame', 'frame'), ('head_stem', 'frame'), ('head_stem', 'frame', 'path_to_frame'), ('head_stem', 'phrase_type')]\n \n size = 40\n instances = []\n for i, p in enumerate(Path(\"/cs/fs2/home/hxiao/Downloads/fndata-1.5/fulltext/\").glob(\"*.xml\")):\n if i == size:\n break\n sys.stderr.write(\"Processing file: '%s'\\n\" %p.absolute())\n annotations = parse_fulltext(str(p.absolute()))\n instances += make_training_data(ALL_FEATURES, annotations)\n\n sys.stderr.write(\"Feature selection...\\n\")\n x, y = zip(*instances)\n x = apply_templates(x, templates)\n features = filter_by_frequency(x, 5)\n sys.stderr.write(\"Feature encoding...\\n\")\n x, feature_map = encode(x, features)\n \n sys.stderr.write(\"Dumping data...\\n\") \n pickle.dump((x, y, ALL_FEATURES, templates, feature_map), open('dump/test_data.pkl', 'w'))\n import pdb\n pdb.set_trace()\n print len(instances)", "def load_data_wrapper():\n tr_d, va_d, te_d = load_data()\n training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]\n training_results = [vectorized_result(y) for y in tr_d[1]]\n training_data = zip(training_inputs, training_results)\n validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]\n validation_data = zip(validation_inputs, va_d[1])\n test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]\n test_data = zip(test_inputs, te_d[1])\n return (training_data, validation_data, test_data)", "def load_data_wrapper():\n tr_d, va_d, te_d = load_data()\n training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]\n training_results = [vectorized_result(y) for y in tr_d[1]]\n training_data = zip(training_inputs, training_results)\n validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]\n validation_data = zip(validation_inputs, va_d[1])\n test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]\n test_data = zip(test_inputs, te_d[1])\n return (training_data, validation_data, test_data)", "def prepare_data_for_training(args):\n # Form the train/test splits and write them to disk\n dataset = data.Dataset(args)\n # get image classes and image counts in each class\n label_map = dataset.get_class_info()\n class_count = len(list(label_map.values()))\n # split the data and store it in log dir\n df_train, df_test = dataset.split_dataset()\n\n # perform dataset augmentations\n image_data = augment.Augmentation(args)\n # get the data gens for training and test images\n train_data_gen, _ = image_data.map_fn_train(df_train)\n test_data_gen, _ = image_data.map_fn_test(df_test)\n\n return train_data_gen, test_data_gen, df_train, df_test, class_count", "def load_dataset_train():\n df_train = load_csv_file(\"31_train.csv\")\n df_train_target = load_csv_file(\"31_target_train.csv\")\n\n return df_train.values, df_train_target.values", "def run_test(self):\n self.output_analytics = self.run_inference()\n self.output_df = pd.DataFrame(self.output_analytics)", "def generate_data(self):\n\n column_num = 1\n src_path = self.src_paths_after_pre_process\n target_path = self.tgt_paths_after_pre_process\n\n src_ds = load_textline_dataset([src_path], column_num)\n\n src_ds = src_ds[0]\n\n input_pipeline_func = self.get_input_pipeline(for_export=False)\n\n src_ds = src_ds.map(\n input_pipeline_func, num_parallel_calls=self.num_parallel_calls)\n\n src_size_ds = src_ds.map(\n lambda x: compute_sen_lens(x, padding_token=utils.PAD_IDX),\n num_parallel_calls=self.num_parallel_calls)\n\n src_ds = src_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n\n if self.infer_without_label:\n data_set = tf.data.Dataset.zip((src_ds, src_size_ds))\n\n else:\n tgt = load_textline_dataset([target_path], column_num)\n tgt = tgt[0]\n tgt_out_ds = tgt.map(lambda x: x + ' ' + self.END_TOKEN)\n tgt_in_ds = tgt.map(lambda x: self.START_TOKEN + ' ' + x)\n\n tgt_in_ds = tgt_in_ds.map(\n lambda batch: self.text_pipeline_func(batch, self.max_dec_len, self.\n text_vocab_file_path),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_in_size_ds = tgt_in_ds.map(\n lambda x: compute_sen_lens(x, padding_token=utils.PAD_IDX),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_in_ds = tgt_in_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n\n inp_ds = tf.data.Dataset.zip(\n (src_ds, src_size_ds, tgt_in_ds, tgt_in_size_ds))\n\n if self.use_label_vocab:\n target_vocab_file_path = self.label_vocab_file_paths[0]\n else:\n target_vocab_file_path = self.text_vocab_file_path\n tgt_out_ds = tgt_out_ds.map(\n lambda batch: self.text_pipeline_func(batch, self.max_dec_len,\n target_vocab_file_path),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_out_ds = tgt_out_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n data_set = tf.data.Dataset.zip((inp_ds, tgt_out_ds))\n\n vocab_dict = load_vocab_dict(self.text_vocab_file_path)\n vocab_size = len(vocab_dict)\n label_vocab_dict = load_vocab_dict(self.label_vocab_file_paths[0])\n label_vocab_size = len(label_vocab_dict)\n data_size = get_file_len(self.src_paths_after_pre_process)\n self.config['data']['vocab_size'] = vocab_size\n self.config['data']['label_vocab_size'] = label_vocab_size\n self.config['data']['{}_data_size'.format(self.mode)] = data_size\n\n return data_set", "def load_data_wrapper():\n tr_d, va_d, te_d = load_data()\n training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]\n training_results = [vectorized_result(y) for y in tr_d[1]]\n training_data = list(zip(training_inputs, training_results))\n validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]\n validation_data = list(zip(validation_inputs, va_d[1]))\n test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]\n test_data = list(zip(test_inputs, te_d[1]))\n return (training_data, validation_data, test_data)", "def load_data_wrapper():\n\n tr_d, va_d, te_d = load_data()\n\n training_inputs = [np.reshape(a=x, newshape=(784, 1)) for x in tr_d[0]]\n training_results = [vectorized_result(y) for y in tr_d[1]]\n training_data = zip(training_inputs, training_results)\n # Need to do list(zip(...)) instead of just zip(...) in Python 3\n # training_data = list(zip(training_inputs, training_results))\n\n validation_inputs = [np.reshape(a=x, newshape=(784, 1)) for x in va_d[0]]\n validation_data = zip(validation_inputs, va_d[1])\n # validation_data = list(zip(validation_inputs, va_d[1]))\n\n test_inputs = [np.reshape(a=x, newshape=(784, 1)) for x in te_d[0]]\n test_data = zip(test_inputs, te_d[1])\n # test_data = list(zip(test_inputs, te_d[1]))\n\n return training_data, validation_data, test_data", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def load_data(params):\n train_df = pd.read_csv(os.path.join(*[dataset_path, params['tokenizer'] + '_train_' + params['stemming'] + '.csv']))\n dev_df = pd.read_csv(os.path.join(*[dataset_path, params['tokenizer'] + '_dev_' + params['stemming'] + '.csv']))\n train_data, label_encode = data_prep(train_df, params, if_resample=True)\n dev_data, _ = data_prep(dev_df, params)\n return train_data, dev_data, label_encode", "def ConcatDF(train_set, test_set):\n return pd.concat([train_set, test_set], sort=True).reset_index(drop=True)", "def load_dataset(data_dir, model_params, inference_mode=False):\n\n # normalizes the x and y columns using the training set.\n # applies same scaling factor to valid and test set.\n\n if isinstance(model_params.data_set, list):\n datasets = model_params.data_set\n else:\n datasets = [model_params.data_set]\n\n train_strokes = None\n valid_strokes = None\n test_strokes = None\n\n for dataset in datasets:\n if data_dir.startswith('http://') or data_dir.startswith('https://'):\n data_filepath = '/'.join([data_dir, dataset])\n tf.logging.info('Downloading %s', data_filepath)\n response = requests.get(data_filepath)\n data = np.load(six.BytesIO(response.content), encoding='latin1')\n else:\n data_filepath = os.path.join(data_dir, dataset)\n data = np.load(data_filepath, encoding='latin1', allow_pickle=True)\n tf.logging.info('Loaded {}/{}/{} from {}'.format(\n len(data['train']), len(data['valid']), len(data['test']),\n dataset))\n if train_strokes is None:\n train_strokes = data['train']\n valid_strokes = data['valid']\n test_strokes = data['test']\n else:\n train_strokes = np.concatenate((train_strokes, data['train']))\n valid_strokes = np.concatenate((valid_strokes, data['valid']))\n test_strokes = np.concatenate((test_strokes, data['test']))\n\n all_strokes = np.concatenate((train_strokes, valid_strokes, test_strokes))\n num_points = 0\n for stroke in all_strokes:\n num_points += len(stroke)\n avg_len = num_points / len(all_strokes)\n tf.logging.info('Dataset combined: {} ({}/{}/{}), avg len {}'.format(\n len(all_strokes), len(train_strokes), len(valid_strokes),\n len(test_strokes), int(avg_len)))\n\n # calculate the max strokes we need.\n max_seq_len = utils.get_max_len(all_strokes)\n # overwrite the hps with this calculation.\n model_params.max_seq_len = max_seq_len\n\n tf.logging.info('model_params.max_seq_len %i.', model_params.max_seq_len)\n\n eval_model_params = sketch_rnn_model.copy_hparams(model_params)\n\n eval_model_params.use_input_dropout = 0\n eval_model_params.use_recurrent_dropout = 0\n eval_model_params.use_output_dropout = 0\n eval_model_params.is_training = 1\n\n if inference_mode:\n eval_model_params.batch_size = 1\n eval_model_params.is_training = 0\n\n sample_model_params = sketch_rnn_model.copy_hparams(eval_model_params)\n sample_model_params.batch_size = 1 # only sample one at a time\n sample_model_params.max_seq_len = 1 # sample one point at a time\n\n train_set = utils.DataLoader(\n train_strokes,\n model_params.batch_size,\n max_seq_length=model_params.max_seq_len,\n random_scale_factor=model_params.random_scale_factor,\n augment_stroke_prob=model_params.augment_stroke_prob)\n\n normalizing_scale_factor = train_set.calculate_normalizing_scale_factor()\n train_set.normalize(normalizing_scale_factor)\n\n valid_set = utils.DataLoader(\n valid_strokes,\n eval_model_params.batch_size,\n max_seq_length=eval_model_params.max_seq_len,\n random_scale_factor=0.0,\n augment_stroke_prob=0.0)\n valid_set.normalize(normalizing_scale_factor)\n\n test_set = utils.DataLoader(\n test_strokes,\n eval_model_params.batch_size,\n max_seq_length=eval_model_params.max_seq_len,\n random_scale_factor=0.0,\n augment_stroke_prob=0.0)\n test_set.normalize(normalizing_scale_factor)\n\n tf.logging.info('normalizing_scale_factor %4.4f.', normalizing_scale_factor)\n\n result = [\n train_set, valid_set, test_set, model_params, eval_model_params,\n sample_model_params\n ]\n return result", "def train_test_data_df(train_data_file, test_data_file):\n dtype_dict = {\n \"age\": np.int32,\n \"education-num\": np.int32,\n \"capital-gain\": np.int32,\n \"capital-loss\": np.int32,\n \"hours-per-week\": np.int32\n }\n cols = [i for i in range(15) if i != 2]\n train_data = pd.read_csv(train_data_file, sep=\", \", header=0, dtype=dtype_dict, na_values=\"?\", usecols=cols)\n train_data = train_data.dropna(axis=0, how=\"any\")\n test_data = pd.read_csv(test_data_file, sep=\", \", header=0, dtype=dtype_dict, na_values=\"?\", usecols=cols)\n test_data = test_data.dropna(axis=0, how=\"any\")\n return train_data, test_data", "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def process_dataset(self):\n\n logger.info('\\n')\n logger.info('=' * 40)\n logger.info('=\\t DeepRank Data Set')\n logger.info('=')\n logger.info('=\\t Training data')\n for f in self.train_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.valid_database:\n logger.info('=\\t Validation data')\n for f in self.valid_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.test_database:\n logger.info('=\\t Test data')\n for f in self.test_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n logger.info('=' * 40 + '\\n')\n sys.stdout.flush()\n\n # check if the files are ok\n self.check_hdf5_files(self.train_database)\n\n if self.valid_database:\n self.valid_database = self.check_hdf5_files(\n self.valid_database)\n\n if self.test_database:\n self.test_database = self.check_hdf5_files(\n self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n if self.mapfly:\n self.get_raw_feature_name()\n else:\n self.get_mapped_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets or self.clip_features:\n if self.mapfly:\n self.compute_norm()\n else:\n self.get_norm()\n\n logger.info('\\n')\n logger.info(\" Data Set Info:\")\n logger.info(\n f' Augmentation : {self.use_rotation} rotations')\n logger.info(\n f' Training set : {self.ntrain} conformations')\n logger.info(\n f' Validation set : {self.nvalid} conformations')\n logger.info(\n f' Test set : {self.ntest} conformations')\n logger.info(f' Number of channels : {self.input_shape[0]}')\n logger.info(f' Grid Size : {self.data_shape[1]}, '\n f'{self.data_shape[2]}, {self.data_shape[3]}')\n sys.stdout.flush()", "def train_all(X_train_fuse, Y_train, X_dev_fuse, Y_dev, R_train, R_dev, hyperparams):", "def create_train_feats():\n features = read_process_labelled(AUDIO_DIR, debug=True)\n df = pd.DataFrame(features)\n p = './Features/dataset_features/data_features.csv'\n df.to_csv(p, index=False)\n return p", "def _prepare_ml_data(X, y, to_optimize=False):\n size_test = 1\n y_test = None\n if to_optimize:\n size_test = CONFIG.OPTIMIZE_PARAMS['size'] + 1\n y_test = y.iloc[-size_test:]\n X_train = X.iloc[:-size_test]\n y_train = y.iloc[:-size_test]\n X_test = X.iloc[-size_test:]\n return X_train, y_train, X_test, y_test", "def Zip(datasets):\n return tf.data.Dataset.zip(datasets)", "def get_inference_dataset(dataset_path,debug=False):\n\n if not os.path.exists(dataset_path):\n assert False, \"Couldn't find path : '{}'\".format(dataset_path)\n print(\"\\nprocessing data :'{}'\\n\".format(dataset_path))\n\n path = os.getcwd()\n os.chdir(dataset_path)\n\n dataset = []\n for file in tqdm(os.listdir('.')):\n if not file.endswith('features'):\n continue\n name = file.replace(\".features\", \"\") # removing \"features\"\n x = np.loadtxt(name + '.features')\n np.nan_to_num(x, copy=False)\n #get labels file\n if os.path.exists(name + '.test.labels'):\n labels_file = open(name + '.test.labels').readlines()\n elif os.path.exists(name + '.labels'):\n labels_file = open(name + '.labels').readlines()\n else:\n continue\n file_info = (name , float(labels_file[-2].split(' ')[-1]),\n np.fromstring(labels_file[1].strip(), sep=' ')[:2],\n float(labels_file[2]))#(file name,window_offset,(onset,offset),vot_type)\n\n dataset.append([torch.from_numpy(x).float(), file_info])\n if debug and len(dataset)>100:\n break\n os.chdir(path)\n\n return DataLoader(dataset,shuffle=False)", "def _generate_datasets(self):\n\n degrade_test = False\n if self._opts['degrade_step'] == 'test':\n degrade_test = True\n\n use_trainset_for_tests = UseTrainForTest.IDENTICAL # can be different in few shot workflow\n\n train_dataset, test_dataset = self._gen_datasets_with_options(self._opts['train_classes'],\n self._opts['test_classes'],\n is_superclass=self._opts['superclass'],\n class_proportion=self._opts['class_proportion'],\n degrade_test=degrade_test,\n degrade_type=self._opts['degrade_type'], # only relevant if degrade_test = True\n degrade_val=self._opts['min_val'], # only relevant if degrade_test = True\n recurse_train=self._is_train_recursive(),\n recurse_test=self._is_inference_recursive(),\n num_batch_repeats=self._opts['num_repeats'],\n recurse_iterations=self._opts['recurse_iterations'],\n evaluate_step=self._opts['evaluate'],\n use_trainset_for_tests=use_trainset_for_tests,\n invert_images=self._opts['invert_images'],\n min_val=self._opts['min_val'])\n return train_dataset, test_dataset", "def combine_all(self):\n combined = copy.deepcopy(self.train)\n\n def _combine_data(data):\n for img_path, pid, camid in data:\n\n if pid in self._junk_pids:\n continue\n #pdb.set_trace()\n pid = self.dataset_name + \"_\" + str(pid)\n camid = self.dataset_name + \"_\" + str(camid)\n combined.append((img_path, pid, camid))\n\n _combine_data(self.query)\n _combine_data(self.gallery)\n\n self.train = combined\n self.num_train_pids = self.get_num_pids(self.train)", "def main():\n df = prepro_last()\n X, y = train_build(df)\n fit_store(X, y)", "def _process_datasets_all_frames(self):\n datasets = os.listdir(self.separated_root)\n for dataset in datasets:\n dataset_path = join(self.separated_root, dataset)\n\n for model in self.models:\n\n attacks_list = os.listdir(dataset_path)\n\n for attack in attacks_list:\n attack_path = join(dataset_path, attack)\n\n for prop in self.properties:\n property_alias = prop.get_property_alias()\n\n if os.path.exists(\n join(self.output_features, dataset, attack, property_alias, model.alias)):\n print('%s already extracted features' % dataset)\n continue\n\n path_train = join(attack_path, self.train_alias)\n path_test = join(attack_path, self.test_alias)\n\n X_train, y_train, indexes_train, samples_train = self._get_dataset_contents(path_train,\n property_alias)\n X_test, y_test, indexes_test, samples_test = self._get_dataset_contents(path_test,\n property_alias)\n\n output_features = join(self.output_features, dataset, attack, property_alias, model.alias)\n\n features_train = self._fetch_features(X_train, model, output_features, self.train_alias)\n features_test = self._fetch_features(X_test, model, output_features, self.test_alias)\n\n # saving features\n np.save(join(output_features, (NAME_FEATURES % self.train_alias)), features_train)\n np.save(join(output_features, (NAME_FEATURES % self.test_alias)), features_test)\n\n # saving targets\n np.save(join(output_features, (NAME_TARGETS % self.train_alias)), y_train)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n\n # saving samples names\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.train_alias)), samples_train)\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.test_alias)), samples_test)", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test", "def run(train_file: str, val_file: str):\n logging.info(f'Training data from: {train_file}')\n train_df = pd.read_csv(train_file)\n logging.info(f'Validation data from: {val_file}')\n val_df = pd.read_csv(val_file)\n print(train_df.head(3))\n train_data = pd.DataFrame({\n 'QueryLength': train_df['Query'].str.len(),\n 'UnigramCount': train_df['Unigram Count'],\n 'Hamming': train_df['Hamming'],\n 'Relevant': train_df['Is Relevant']\n })\n val_data = pd.DataFrame({\n 'QueryLength': val_df['Query'].str.len(),\n 'UnigramCount': val_df['Unigram Count'],\n 'Hamming': val_df['Hamming'],\n 'Relevant': val_df['Is Relevant']\n })\n logging.info('Sample prepared data')\n print(train_data.head(3))\n logging.info('Summary of traininging data')\n print(train_data.info())\n logging.info('Summary of validation data')\n print(train_data.info())\n train_ds = to_dataset(train_data)\n val_ds = to_dataset(val_data)\n feature_columns = []\n headers = ['QueryLength', 'UnigramCount', 'Hamming']\n for header in headers:\n feature_columns.append(feature_column.numeric_column(header))\n feature_layer = tf.keras.layers.DenseFeatures(feature_columns)\n model = tf.keras.Sequential([\n feature_layer,\n layers.Dense(128, activation='relu'),\n layers.Dense(128, activation='relu'),\n layers.Dropout(.1),\n layers.Dense(1)\n ])\n model.compile(optimizer='adam',\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=['accuracy'])\n model.fit(train_ds, validation_data=val_ds, epochs=10)\n #y_pred = model.predict(train_ds)\n #cm = confusion_matrix(headers, y_pred)\n #print(cm)", "def load_data(args) -> pd.DataFrame:\n\n df = pd.read_csv(os.path.join(args.data_dir, args.training_file), delimiter=\"\\t\").sample(frac=1, random_state=args.random_seed).reset_index(drop=True)\n df_test = pd.read_csv(os.path.join(args.data_dir, args.testing_file), delimiter=\"\\t\")\n\n # startified validation split\n if not args.use_custom_split:\n train_df, valid_df = train_test_split(\n df, stratify=df[args.label_col], test_size=args.split_size\n )\n # add is_valid column\n train_df[args.validation_col] = False\n valid_df[args.validation_col] = True\n df = pd.concat([train_df, valid_df]).reset_index(drop=True)\n # free up memory\n del train_df, valid_df\n\n return df, df_test", "def load_train_test() -> Tuple[pd.DataFrame, pd.DataFrame]:\n df = load_data() \n #Sample\n train_df = df.sample(frac=0.8, random_state=42)\n test_df = df[~df.index.isin(train_df.index)]\n #Rescale\n train_mean = train_df.iloc[:, :-1].mean()\n train_std = train_df.iloc[:, :-1].std()\n train_df.iloc[:, :-1] = (train_df.iloc[:, :-1] - train_mean) / train_std\n test_df.iloc[:, :-1] = (test_df.iloc[:, :-1] - train_mean) / train_std\n return train_df, test_df", "def fetch_adult_df(preprocess=False):\n (train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(\n \"adult\", \"classification\", astype=\"pandas\", preprocess=preprocess\n )\n orig_X = pd.concat([train_X, test_X]).sort_index()\n orig_y = pd.concat([train_y, test_y]).sort_index()\n if preprocess:\n sex = pd.Series(orig_X[\"sex_Male\"] == 1, dtype=np.float64)\n race = pd.Series(orig_X[\"race_White\"] == 1, dtype=np.float64)\n dropped_X = orig_X.drop(\n labels=[\n \"race_Amer-Indian-Eskimo\",\n \"race_Asian-Pac-Islander\",\n \"race_Black\",\n \"race_Other\",\n \"race_White\",\n \"sex_Female\",\n \"sex_Male\",\n ],\n axis=1,\n )\n encoded_X = dropped_X.assign(sex=sex, race=race)\n assert not encoded_X.isna().any().any()\n assert not orig_y.isna().any().any()\n fairness_info = {\n \"favorable_labels\": [1],\n \"protected_attributes\": [\n {\"feature\": \"sex\", \"reference_group\": [1]},\n {\"feature\": \"race\", \"reference_group\": [1]},\n ],\n }\n return encoded_X, orig_y, fairness_info\n else:\n fairness_info = {\n \"favorable_labels\": [\">50K\"],\n \"protected_attributes\": [\n {\"feature\": \"race\", \"reference_group\": [\"White\"]},\n {\"feature\": \"sex\", \"reference_group\": [\"Male\"]},\n ],\n }\n return orig_X, orig_y, fairness_info", "def method_2():\n encoded_parts = np.array_split(encoded[:train_size], batch_size) # split it into n batches\n datasets = [] # a list of n datasets, e.g [{1,2,3,4,5}, {6,7,8,9,10}, {11,12,13,14,15}, ...]\n for encoded_part in encoded_parts:\n dataset = tf.data.Dataset.from_tensor_slices(encoded_part)\n dataset = dataset.window(window_length, shift=n_steps, drop_remainder=True)\n dataset = dataset.flat_map(lambda window: window.batch(window_length))\n datasets.append(dataset)\n dataset = tf.data.Dataset.zip(tuple(datasets)) # e.g. {{1,6,11,...}, {2,7,12,...}, {3,8,13,...}, ...}\n dataset = dataset.map(lambda *windows: tf.stack(windows)) # there're n windows in one take, so use * to pack them\n # after this step, one take will yield (32, None)\n dataset = dataset.repeat().map(lambda windows: (windows[:, :-1], windows[:, 1:]))\n dataset = dataset.map(\n lambda X_batch, Y_batch: (tf.one_hot(X_batch, depth=max_id), Y_batch))\n dataset = dataset.prefetch(1)\n return dataset", "def data():\n\n run_type = 'standardised'\n sr = 48000\n train_perc = 0.9\n\n if sr == 48000:\n time_dimension = 282\n if sr == 44100:\n time_dimension = 259\n\n x_train, y_train, x_test, y_test = essential.compile_dataset(run_type, sr)\n\n # reshape for CNN input\n x_train = np.array([x.reshape((128, time_dimension, 1)) for x in x_train])\n x_test = np.array([x.reshape((128, time_dimension, 1)) for x in x_test])\n\n # encoded \n encoder = LabelEncoder()\n encoder.fit(y_train)\n encoder.fit(y_test)\n y_train = encoder.transform(y_train)\n y_test = encoder.transform(y_test)\n\n return x_train, y_train, x_test, y_test", "def _data_zip(train_data, eval_data, test_data):\n return {\n tf.estimator.ModeKeys.TRAIN: train_data,\n tf.estimator.ModeKeys.EVAL: eval_data,\n tf.estimator.ModeKeys.INFER: test_data,\n }", "def get_datasets(config: ModelSettings, df: pd.DataFrame):\n train_filenames = df.loc[df.train_data == 1, \"filename\"].values\n val_filenames = df.loc[df.val_data == 1, \"filename\"].values\n test_filenames = df.loc[df.test_data == 1, \"filename\"].values\n\n train_zspacings = df.loc[df.train_data == 1, \"pixel_spacingz\"].values\n val_zspacings = df.loc[df.val_data == 1, \"pixel_spacingz\"].values\n test_zspacings = df.loc[df.test_data == 1, \"pixel_spacingz\"].values\n\n train_dataset = BPRDataset(\n data_path=config.data_path,\n filenames=train_filenames,\n z_spacings=train_zspacings,\n landmark_path=config.landmark_path,\n landmark_sheet_name=\"landmarks-train\",\n random_seed=config.random_seed,\n custom_transform=config.custom_transform,\n albumentation_transform=config.albumentation_transform,\n equidistance_range=config.equidistance_range,\n num_slices=config.num_slices,\n )\n\n val_dataset = BPRDataset(\n data_path=config.data_path,\n filenames=val_filenames,\n z_spacings=val_zspacings,\n landmark_path=config.landmark_path,\n landmark_sheet_name=\"landmarks-val\",\n random_seed=config.random_seed,\n custom_transform=config.custom_transform,\n albumentation_transform=config.albumentation_transform,\n equidistance_range=config.equidistance_range,\n num_slices=config.num_slices,\n )\n\n test_dataset = BPRDataset(\n data_path=config.data_path,\n filenames=test_filenames,\n z_spacings=test_zspacings,\n landmark_path=config.landmark_path,\n landmark_sheet_name=\"landmarks-test\",\n random_seed=config.random_seed,\n custom_transform=config.custom_transform,\n albumentation_transform=config.albumentation_transform,\n equidistance_range=config.equidistance_range,\n num_slices=config.num_slices,\n )\n\n return train_dataset, val_dataset, test_dataset", "def load_training_as_df(\n key: str, source_base_path: str = SOURCE_BASE_PATH, include_validation: bool = False) -> pd.DataFrame:\n\n train = _load_target_and_source(key, source_base_path, 'train')\n\n if include_validation:\n validation = _load_target_and_source(key, source_base_path, 'validation')\n return pd.concat([train, validation])\n else:\n return train", "def _create_data():\n tf.logging.info(\"Create records..\")\n train, val, test = util.load_data(data_dir, FLAGS[\"is_aug\"])\n tf.logging.info(\"Dataset size: Train-{} Test-{} Val-{}\".format(len(train), len(test), len(val)))\n return train, val, test", "def load_data(include_extra=False):\n\n if not os.path.exists(outfile_train):\n download_svhn(url_train, outfile_train)\n\n if not os.path.exists(outfile_test):\n download_svhn(url_test, outfile_test)\n\n mat = sp.io.loadmat(outfile_train)\n x_train = mat['X']\n y_train = mat['y']\n\n mat = sp.io.loadmat(outfile_test)\n x_test = mat['X']\n y_test = mat['y']\n\n if include_extra:\n if not os.path.isfile(outfile_extra):\n download_svhn_extra()\n mat_e = sp.io.loadmat(outfile_extra)\n x_train = np.concatenate((x_train, mat_e['X']), axis=-1)\n y_train = np.concatenate((y_train, mat_e['y']), axis=0)\n\n x_train = preprocess(x_train)\n x_test = preprocess(x_test)\n y_train[y_train == 10] = 0\n y_test[y_test == 10] = 0\n y_test = np.squeeze(y_test)\n y_train = keras.utils.to_categorical(y_train)\n y_train = y_train.astype('float32')\n\n return x_train, y_train, x_test, y_test, [str(i) for i in range(10)]", "def run_train_test_split():\n # Load all documents\n conn = sq.connect(config.DB_FILE)\n documents = pd.read_sql_query('select pubmed_id, review_id, included, title, abstract from article ', conn)\n\n # Identify unique review IDs\n review_ids = documents['review_id'].unique()\n\n # Set seed for random sampling\n np.random.seed(2)\n\n # List of Reviews in the partial data set and full data set\n partial_set = list(np.random.choice(review_ids, 10, replace=False))\n full_set = list(review_ids.copy())\n\n # Load array (X) and labels (Y) of all documents\n with (open(config.DOC_TERM_MATRIX, \"rb\")) as openfile:\n X = pickle.load(openfile)\n\n y = documents['included']\n\n # Train-test split of the partial dataset\n train_test_split(X, y, partial_set, 'min_max', 'partial', review_ids)\n train_test_split(X, y, partial_set, 'tf_idf', 'partial', review_ids)\n\n # Train-test split of the full dataset\n train_test_split(X, y, full_set, 'min_max', 'full', review_ids)\n train_test_split(X, y, full_set, 'tf_idf', 'full', review_ids)", "def inference(self, dataset, model_dir):\n raise NotImplementedError", "def make_training_set(ind_list, training_data): \n \n exp = training_data[ind_list[0]] \n X_train = exp[0]\n u_train = exp[1] \n\n for i in ind_list[1:]: \n exp = training_data[i]\n X_train = np.append(X_train, exp[0], axis=0)\n u_train = np.append(u_train, exp[1], axis=0)\n\n return X_train, u_train", "def _inference_results(self, labels, predicts, sample_weights, sample_ids, predicts_per_coordinate):\n predicts = predicts.flatten()\n if labels is not None:\n labels = labels.flatten()\n if predicts_per_coordinate is not None:\n predicts_per_coordinate = predicts_per_coordinate.flatten()\n batch_size = predicts.size\n params = self.schema_params\n records = []\n for i in range(batch_size):\n record = {params.prediction_score_column_name: predicts[i], params.weight_column_name: sample_weights[i],\n params.uid_column_name: sample_ids[i]}\n if labels is not None:\n record[params.label_column_name] = labels[i]\n if predicts_per_coordinate is not None:\n record[params.prediction_score_per_coordinate_column_name] = predicts_per_coordinate[i]\n records.append(record)\n return records", "def inference(model, data, diagnostics, seed, extra_fitting_args):\n pass", "def get_training_data(self):\n\n # this actually never was a set\n # src_set = self.target['src'].values\n # dst_set = self.target['dst'].values\n\n # train_negative = self.get_negative_edges(src_set, dst_set, self.train_ind.shape[0]) # * self.K)\n # test_negative = self.get_negative_edges(src_set, dst_set, self.test_ind.shape[0])\n\n train_positive = self.target.iloc[self.train_edge_ind].values\n test_positive = self.target.iloc[self.test_edge_ind].values\n\n # # print(train_positive.shape, train_negative.shape, test_positive.shape, test_negative.shape)\n # print(f\"Working with {train_positive.shape[0]} positive and {train_negative.shape[0]} negative samples in the train set, {test_positive.shape[0]} and {test_negative.shape[0]} - in test set\")\n\n X_train = train_positive\n X_test = test_positive\n\n y_train = np.ones((self.train_edge_ind.shape[0],))\n y_test = np.ones((self.test_edge_ind.shape[0],))\n\n # X_train = np.vstack([\n # train_positive,\n # train_negative\n # ])\n\n # X_test = np.vstack([\n # test_positive,\n # test_negative\n # ])\n\n # y_train = np.concatenate([np.ones((self.train_ind.shape[0],)), np.zeros((self.train_ind.shape[0]),)]) # self.train_ind.shape[0]) * self.K\n # y_test = np.concatenate([np.ones((self.test_ind.shape[0],)), np.zeros((self.test_ind.shape[0],))])\n\n assert X_train.shape[0] == y_train.shape[0]\n assert X_test.shape[0] == y_test.shape[0]\n\n def shuffle(X, y):\n ind_shuffle = np.arange(0, X.shape[0])\n np.random.shuffle(ind_shuffle)\n return X[ind_shuffle], y[ind_shuffle]\n\n self.X_train, self.y_train = shuffle(X_train, y_train)\n self.X_test, self.y_test = shuffle(X_test, y_test)\n\n print(f\"Splitting into {self.X_train.shape[0]} train and {self.X_test.shape[0]} test samples\")\n\n # return X_train, X_test, y_train, y_test", "def _ExtractInputs(batched_extract: types.Extracts,\n eval_config: config.EvalConfig) -> types.Extracts:\n result = copy.copy(batched_extract)\n (record_batch, serialized_examples) = (\n _DropUnsupportedColumnsAndFetchRawDataColumn(\n batched_extract[constants.ARROW_RECORD_BATCH_KEY]))\n dataframe = record_batch.to_pandas()\n\n # In multi-output model, the keys (labels, predictions, weights) are\n # keyed by output name. In this case, we will have a nested dict in the\n # extracts keyed by the output names.\n def _get_proj_df_dict(original, keys_dict, allow_missing=False): # pylint: disable=invalid-name\n df_proj = pd.DataFrame()\n for output_name, key in keys_dict.items():\n if key in dataframe:\n df_proj[output_name] = original[key]\n elif allow_missing:\n df_proj[output_name] = [None] * len(serialized_examples)\n return df_proj.to_dict(orient='records')\n\n def _add_proj_df(proj_df, result, key): # pylint: disable=invalid-name\n if proj_df.shape[1] == 0:\n return\n elif proj_df.shape[1] == 1:\n result[key] = proj_df[proj_df.columns[0]]\n else:\n result[key] = proj_df.to_dict(orient='records')\n\n labels_df = pd.DataFrame()\n example_weights_df = pd.DataFrame()\n predictions_df = pd.DataFrame()\n for spec in eval_config.model_specs:\n # Note that we allow the label_key to be unset as some metrics don't use\n # labels. We also allow the label_key to missing from the inputs since the\n # label handling logic may be handled by downstream extractors.\n if spec.label_key:\n if spec.label_key in dataframe:\n labels_df[spec.name] = dataframe[spec.label_key]\n else:\n labels_df[spec.name] = [None] * len(serialized_examples)\n elif spec.label_keys:\n labels_df[spec.name] = _get_proj_df_dict(dataframe, spec.label_keys, True)\n else:\n labels_df[spec.name] = [None] * len(serialized_examples)\n\n if spec.example_weight_key:\n example_weights_df[spec.name] = dataframe[spec.example_weight_key]\n elif spec.example_weight_keys:\n example_weights_df[spec.name] = _get_proj_df_dict(\n dataframe, spec.example_weight_keys)\n\n if spec.prediction_key and spec.prediction_key in dataframe:\n predictions_df[spec.name] = dataframe[spec.prediction_key]\n elif spec.prediction_keys:\n proj_df_dict = _get_proj_df_dict(dataframe, spec.prediction_keys)\n if proj_df_dict:\n predictions_df[spec.name] = _get_proj_df_dict(dataframe,\n spec.prediction_keys)\n\n _add_proj_df(labels_df, result, constants.LABELS_KEY)\n _add_proj_df(example_weights_df, result, constants.EXAMPLE_WEIGHTS_KEY)\n _add_proj_df(predictions_df, result, constants.PREDICTIONS_KEY)\n\n # Add a separate column with the features dict.\n result[constants.FEATURES_KEY] = dataframe.to_dict(orient='records')\n\n # TODO(pachristopher): Consider avoiding setting this key if we don't need\n # this any further in the pipeline. This can avoid a potentially costly copy.\n result[constants.INPUT_KEY] = serialized_examples\n return result", "def prepare_data(train, test):\n # change the name of the target column\n train.rename(columns={\"revenue\": \"target\"}, inplace=True)\n # map bool values to yes and no\n train[\"Weekend\"] = train[\"Weekend\"].map({True: \"Yes\", False: \"No\"})\n test[\"Weekend\"] = test[\"Weekend\"].map({True: \"Yes\", False: \"No\"})\n # set the id col as index\n train.set_index(\"id\", inplace=True)\n test.set_index(\"id\", inplace=True)\n\n # seperate the fetures and the target\n X_train = train.drop(\"target\", axis=1).copy()\n y_train = train[\"target\"].copy()\n X_test = test.copy()\n\n # select numerical and categorical columns\n num_cols = X_train.select_dtypes(exclude=\"object\").columns.tolist()\n cat_cols = X_train.select_dtypes(include=\"object\").columns.tolist()\n\n # numerical pipeline\n num_pipe = make_pipeline(SimpleImputer(strategy=\"mean\"))\n\n # categorical pipeline\n cat_pipe = make_pipeline(\n SimpleImputer(strategy=\"constant\", fill_value=\"NA\"),\n OneHotEncoder(handle_unknown=\"ignore\", sparse=False),\n )\n\n # full pipeline for data preprocessing\n full_pipe = ColumnTransformer(\n [(\"num\", num_pipe, num_cols), (\"cat\", cat_pipe, cat_cols)]\n )\n return X_train, y_train, X_test, full_pipe", "def _load_training_and_test_sets(normalize):\n class_labels = []\n test_labels = []\n norm = None\n if normalize == True:\n norm = loading.get_normalize_vector()\n\n for i in range(0, 10):\n [training, test] = loading.load_number_set(i, 0.7, norm_vector=norm)\n labels = [str(i)] * training.shape[0]\n tlabels = [str(i)] * test.shape[0]\n if i == 0:\n train_points = training\n test_points = test\n else:\n train_points = np.concatenate((train_points, training), axis = 0)\n test_points = np.concatenate((test_points, test), axis = 0)\n class_labels.extend(labels)\n test_labels.extend(tlabels)\n\n return train_points, test_points, class_labels, test_labels", "def load_dataset():\n temp = gzip.open('mnist.pkl.gz')\n train, val , test = pickle.load(temp,encoding='latin1')\n temp.close()\n train_inp = [np.reshape(x, (784,1)) for x in train[0]]\n train_outp = [one_hot(y) for y in train[1]]\n training_data = zip(train_inp, train_outp)\n validation_inp = [np.reshape(x, (784, 1)) for x in val[0]]\n validation_data = zip(validation_inp, val[1])\n test_inp = [np.reshape(x, (784, 1)) for x in test[0]]\n test_data = zip(test_inp, test[1])\n return (training_data,validation_data,test_data)", "def create_tf_datasets(self):\n images = []\n labels = []\n\n images = self.dataframe_labeled_samples.index.values\n\n labels.append(\n tuple(self.dataframe_labeled_samples['Intersection'].values.astype('uint8')))\n\n images = [\n os.path.join(\n os.path.dirname(\n self.summary_manager.current_labelgui_summary_filepath),\n img_name) for img_name in images]\n labels = list(chain.from_iterable(labels))\n\n\n if self.validation_split == 0:\n images = np.array([\n self.image_preprocessor(\n imageio.imread(f)) for f in tqdm(images)])\n images = tf.data.Dataset.from_tensor_slices(images)\n labels = tf.data.Dataset.from_tensor_slices(labels)\n dataset = tf.data.Dataset.zip((images, labels))\n return dataset, None\n\n images, images_val, labels, labels_val = train_test_split(\n images, labels, test_size=self.validation_split, random_state=0)\n\n train_split_filename = ((\n f'{self.save_checkpoint_filepath or self.checkpoint_filepath}'\n f'_train_split.txt'\n ))\n print(f\"Saving train split files to: {train_split_filename}\")\n with open(train_split_filename, 'w+')\\\n as train_split_file:\n for img in images:\n train_split_file.write(img + '\\n')\n \n val_split_filename = ((\n f'{self.save_checkpoint_filepath or self.checkpoint_filepath}'\n f'_val_split.txt'\n ))\n print(f\"Saving train split files to: {val_split_filename}\")\n with open(val_split_filename, 'w+')\\\n as val_split_file:\n for img in images_val:\n val_split_file.write(img + '\\n')\n\n print(f\"Loading validation image paths ({len(images)}) with preprocessor\")\n images = np.array([\n self.image_preprocessor(\n imageio.imread(f)) for f in tqdm(images)])\n images = tf.data.Dataset.from_tensor_slices(images)\n\n print(f\"Loading labels into tf tensor\")\n labels = tf.data.Dataset.from_tensor_slices(labels)\n print(f\"Creating zipped dataset with images and labels\")\n dataset = tf.data.Dataset.zip((images, labels))\n\n print(f\"Loading validation image paths ({len(images_val)}) with preprocessor\")\n images_val = np.array([\n self.image_preprocessor(\n imageio.imread(f)) for f in tqdm(images_val)])\n #images_val = np.array([self.image_preprocessor(f) for f in tqdm(images_val)])\n images_val = tf.data.Dataset.from_tensor_slices(images_val)\n #images_val = tf.data.Dataset.list_files(images_val)\n #images_val = images_val.map(tf.io.read_file)\n print(f\"Loading validation labels into tf tensor\")\n labels_val = tf.data.Dataset.from_tensor_slices(labels_val)\n print(f\"Creating validation zipped dataset with images and labels\")\n dataset_val = tf.data.Dataset.zip((images_val, labels_val))\n\n return dataset, dataset_val", "def make_tf_datasets(data_df, target_col):\n train, test = train_test_split(data_df, test_size=0.2)\n train, val = train_test_split(train, test_size=0.2)\n\n print(len(train), 'train examples')\n print(len(test), 'test examples')\n\n batch_size = 256\n train_ds = df_to_train_dataset(train, target_col, shuffle=True, batch_size=batch_size)\n val_ds = df_to_train_dataset(val, target_col, shuffle=False, batch_size=batch_size)\n test_ds = df_to_train_dataset(test, target_col, shuffle=False, batch_size=batch_size)\n\n return train_ds, val_ds, test_ds", "def concat_all_evaluation_results(list_of_folders):\n\n\n train_eval_df_list = []\n val_eval_df_list = []\n train_val_eval_df_list = []\n\n\n for item in list_of_folders:\n path_to_eval_folder = os.path.join(EMBEDDING_DEST, item)\n files = os.listdir(path_to_eval_folder)\n\n for f in files:\n\n # for each evaluation result csv file, see whether it is from training set, or validation set, or training+validation\n if f.endswith(\"image_level_evaluation_result_top_tri.csv\"):\n\n if \"random\" in f:\n if \"random_training_validation\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n train_val_eval_df_list.append(df)\n\n elif \"random_training\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n train_eval_df_list.append(df)\n\n\n elif \"random_validation\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n val_eval_df_list.append(df)\n\n\n else:\n if \"triplet\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n train_val_eval_df_list.append(df)\n\n elif \"training\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n train_eval_df_list.append(df)\n\n elif \"validation\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n val_eval_df_list.append(df)\n\n\n # add 'training_' or 'validation_' to the column names of evaluation results coming from training and validation sets.\n # This is to be able to distinguish them in the final general csv file.\n\n columns = list(train_val_eval_df_list[0])\n train_columns = [\"training_\"+item for item in columns[1:]]\n train_columns = [columns[0]] + train_columns\n train_columns_dict ={}\n \n val_columns = [\"validation_\"+item for item in columns[1:]]\n val_columns = [columns[0]] + val_columns\n val_columns_dict ={}\n\n #train_and_val_columns = [\"train_and_validation_\"+item for item in columns[1:]]\n #train_and_val_columns = [columns[0]] + train_and_val_columns\n #train_and_val_columns_dict ={}\n\n\n for i in range(len(columns)):\n train_columns_dict[columns[i]] = train_columns[i]\n val_columns_dict[columns[i]] = val_columns[i]\n #train_and_val_columns_dict[columns[i]] = train_and_val_columns[i]\n\n\n concatenated_training_df = pd.concat(train_eval_df_list, sort=False)\n concatenated_training_df = concatenated_training_df.rename(columns=train_columns_dict)\n\n concatenated_validation_df = pd.concat(val_eval_df_list, sort=False)\n concatenated_validation_df = concatenated_validation_df.rename(columns=val_columns_dict)\n \n concatenated_train_and_validation_df = pd.concat(train_val_eval_df_list, sort=False)\n #concatenated_train_and_validation_df = concatenated_train_and_validation_df.rename(columns=train_and_val_columns_dict)\n\n\n concatenated_training_df.to_csv(os.path.join(EMBEDDING_DEST,\"compare_with_no_sz\", \"training_all_evaluation_result_top_tri.csv\"),index=None)\n concatenated_validation_df.to_csv(os.path.join(EMBEDDING_DEST, \"compare_with_no_sz\", \"validation_all_evaluation_result_top_tri.csv\"),index=None)\n concatenated_train_and_validation_df.to_csv(os.path.join(EMBEDDING_DEST,\"compare_with_no_sz\",\"training_and_validation_all_evaluation_result_top_tri.csv\"), index=None)\n\n # ---------\n # If you have columns on arguments, keep them in training but drop them in validation and train_and_val to prevent duplicates\n list_of_cols_in_validation_df = list(concatenated_validation_df)\n list_of_cols_in_train_val_df = list(concatenated_train_and_validation_df)\n args_cols = get_json_argument_list()\n\n args_cols_val = [\"validation_\"+item for item in args_cols]\n \n if len(list_of_cols_in_train_val_df) == len(list_of_cols_in_validation_df) and len(list_of_cols_in_train_val_df) > 7:\n concatenated_validation_df = concatenated_validation_df.drop(args_cols_val, axis=1, errors='ignore')\n concatenated_train_and_validation_df = concatenated_train_and_validation_df.drop(args_cols, axis=1, errors='ignore')\n\n\n # ---------\n\n all_three_df_list = [concatenated_training_df, concatenated_validation_df, concatenated_train_and_validation_df]\n concatenated_all_df = pd.concat(all_three_df_list, axis=1)\n concatenated_all_df.to_csv(os.path.join(EMBEDDING_DEST,\"compare_with_no_sz\", \"all_evaluation_result_top_tri.csv\"), index=None)", "def inference(self):\r\n\t\tfor partition, loader in self.loaders.items():\r\n\t\t\tavg_loss, (y, y_hat), post, attentions, tags = self.eval_loader(\r\n\t\t\t\tloader)\r\n\t\t\tself.preds[partition] = {\r\n\t\t\t\t'tag': tags,\r\n\t\t\t\t'y': y,\r\n\t\t\t\t'y_hat': y_hat,\r\n\t\t\t\t# 'posteriors': post,\r\n\t\t\t\t# 'attentions': attentions\r\n\t\t\t}", "def train_model(self):\n labels = np.zeros((0, 1))\n left_data = np.zeros((0, self.feature_num))\n right_data = np.zeros((0, self.feature_num))\n for i, speaker in enumerate(self.speakers):\n speaker_data = self.load_data(speaker)\n\n if len(speaker_data.shape) == 3:\n left_channel = speaker_data[0]\n else:\n left_channel = speaker_data\n\n speaker_labels = np.reshape(np.array([i for x in range(len(left_channel))]), (-1, 1))\n\n labels = np.vstack((labels, speaker_labels))\n left_data = np.vstack((left_data, left_channel))\n\n if self.both_channels:\n right_channel = speaker_data[1]\n right_data = np.vstack((right_data, right_channel))\n\n labels = np.reshape(labels, (labels.shape[0],))\n\n self.left_model.fit(left_data, labels)\n if self.both_channels:\n self.right_model.fit(right_data, labels)", "def get_data():\r\n if not path_validation(MODEL_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_DATA_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_LABEL_PATH, read_access=True):\r\n exit(0) \r\n\r\n params = joblib.load(MODEL_PATH)\r\n test_images = np.load(TEST_DATA_PATH)\r\n test_labels = np.load(TEST_LABEL_PATH)\r\n\r\n # Addition of bias in test set\r\n test_images = np.insert(test_images, 0, 1, axis=1)\r\n\r\n return params, test_images, test_labels", "def Train_data():\n print (\"loading train data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n with h5py.File(join(data_root, './data/train_real2.h5')) as f:\n data_real = f['train_real'][:]\n num, nt, ny, nx = data_real.shape\n data_real = np.transpose(data_real, (0, 1, 3, 2))\n with h5py.File(join(data_root, './data/train_imag2.h5')) as f:\n data_imag = f['train_imag'][:]\n num, nt, ny, nx = data_imag.shape\n data_imag = np.transpose(data_imag, (0, 1, 3, 2))\n data = data_real+1j*data_imag\n num_train = 15000\n num_validate = 2000\n train_data = data[0:num_train]\n validate_data = data[num_train:num_train+num_validate]\n\n train_data = np.random.permutation(train_data)\n\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end-time_start))\n return train_data, validate_data", "def ConcatDF(train_set, test_set):\n df_all = pd.concat([train_set, test_set], sort=True).reset_index(drop=True)\n df_all.trn_len = train_set.shape[0]\n return df_all", "def create_train_test(dataframe_all):\n label_encoder=LabelEncoder()\n split = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=42)\n for train_index, test_index in split.split(dataframe_all['word_values'], dataframe_all['document_label']):\n strat_train_set = dataframe_all.loc[train_index]\n strat_test_set = dataframe_all.loc[test_index]\n\n strat_train_set = strat_train_set.dropna(subset=['word_values'])\n strat_test_set = strat_test_set.dropna(subset=['word_values'])\n pipe=su.pipe()\n x_train, y_train = pipe.fit_transform(strat_train_set), label_encoder.fit_transform(\n strat_train_set['document_label'])\n x_test, y_test = pipe.transform(strat_test_set), label_encoder.fit_transform(\n strat_test_set['document_label'])\n\n return x_train,x_test,y_train,y_test", "def get_data():\n iris = datasets.load_iris()\n xall = np.asarray(iris[\"data\"], dtype=np.float64)\n yall = np.asarray(iris[\"target\"], dtype=np.float64)\n xall = np.vstack([xall, (7, 2.0, 4.5, 1)])\n yall = np.append(yall, n_classes)\n X, Xval, y, yval = train_test_split(\n xall, yall, test_size=0.2, shuffle=True, random_state=12345\n )\n y = tf.one_hot(y, n_classes)\n yval = tf.one_hot(yval, n_classes)\n return X, y, Xval, yval", "def data(self):\n (x_train, y_train), (_, _) = datasets.fashion_mnist.load_data()\n x_train = x_train.reshape((-1, 28, 28, 1))\n x_train, y_train = x_train.astype('float16') / 255.0, \\\n tf.keras.utils.to_categorical(y_train.astype('float16'), 10)\n (x_train, x_eval) = x_train[5000:], x_train[:5000]\n (y_train, y_eval) = y_train[5000:], y_train[:5000]\n train_data, eval_data = (x_train, y_train), (x_eval, y_eval)\n return train_data, eval_data", "def get_preprocessed_data(result_path, para_index):\n para_path = result_path + '/' + str(para_index)\n X_train_dir = para_path + '/X_train_important.csv'\n y_train_dir = para_path + '/y_train.csv'\n X_test_dir = para_path + '/X_test_important.csv'\n y_test_dir = para_path + '/y_test.csv'\n X_train = pd.read_csv(X_train_dir, index_col = 0)\n y_train = pd.read_csv(y_train_dir, index_col = 0)\n X_test = pd.read_csv(X_test_dir, index_col = 0)\n y_test = pd.read_csv(y_test_dir, index_col = 0)\n return X_train, y_train, X_test, y_test", "def get_311_data():\n # reading in data and saving to separate DFs\n source = spark.read.csv(\"source.csv\", sep=\",\", header=True, inferSchema=True)\n case = spark.read.csv(\"case.csv\", sep=\",\", header=True, inferSchema=True)\n dept = spark.read.csv(\"dept.csv\", sep=\",\", header=True, inferSchema=True)\n\n # returning DFs\n return source, case, dept", "def PrepareSets(args, tokenizer, train_set, dev_set, test_set, first_label=False):\n\n # filter out al instances where the emotion is neutral\n train_set = train_set.filter(lambda example: not 27 in example['labels'])\n dev_set = dev_set.filter(lambda example: not 27 in example['labels'])\n test_set = test_set.filter(lambda example: not 27 in example['labels'])\n\n # remove unnecessary columns\n train_set = train_set.remove_columns(['text', 'id'])\n dev_set = dev_set.remove_columns(['text', 'id'])\n test_set = test_set.remove_columns(['text', 'id'])\n\n # function that creates new instances for all labels\n def handle_multiple_labels(batch):\n new_batch = {'attention_mask': [],\n 'input_ids': [],\n 'labels': [],\n 'token_type_ids': [],\n }\n for instance_idx, instance in enumerate(batch['labels']):\n for label in instance:\n new_batch['attention_mask'].append(batch['attention_mask'][instance_idx])\n new_batch['input_ids'].append(batch['input_ids'][instance_idx])\n new_batch['labels'].append(label)\n new_batch['token_type_ids'].append(batch['token_type_ids'][instance_idx])\n return new_batch\n\n # function that takes the first label\n def handle_first_label(batch):\n batch['labels'] = batch['labels'][0]\n return batch\n\n # check which label function to use\n if first_label:\n label_fn = handle_first_label\n batched = False\n else:\n label_fn = handle_multiple_labels\n batched = True\n\n # filter the labels\n train_set = train_set.map(label_fn, batched=batched)\n dev_set = dev_set.map(label_fn, batched=batched)\n test_set = test_set.map(label_fn, batched=batched)\n\n # return the prepared datasets\n return train_set, dev_set, test_set", "def GetDataset():\n x_train = []\n x_test = []\n y_train = []\n y_test = []\n\n classes1 = set()\n classes2 = set()\n for f in GetInputFiles():\n class1, class2, fold, fname = f.split('\\\\')[-4:]\n classes1.add(class1)\n classes2.add(class2)\n class1 = class1.split('_')[0]\n class2 = class2.split('_')[0]\n\n x = ReadAndTokenize(f)\n y = [int(class1 == 'positive'), int(class2 == 'truthful')]\n if fold == 'fold4':\n x_test.append(x)\n y_test.append(y)\n else:\n x_train.append(x)\n y_train.append(y)\n\n ### Make numpy arrays.\n x_test = MakeDesignMatrix(x_test)\n x_train = MakeDesignMatrix(x_train)\n y_test = numpy.array(y_test, dtype='float32')\n y_train = numpy.array(y_train, dtype='float32')\n\n dataset = (x_train, y_train, x_test, y_test)\n with open('dataset.pkl', 'wb') as fout:\n pickle.dump(dataset, fout)\n return dataset", "def prepare_class_data_for_prediction(dataframe, model_dict, user_keyword, task_name):\r\n test_tweets = dataframe.iloc[:, [0, 1, 2]]\r\n\r\n parent_dir = Path.cwd().parent\r\n pickle_dir = parent_dir.joinpath('default_results', 'pickle_files_feat_eng')\r\n feature_X_user = pd.DataFrame\r\n emo_X_test_dict = {}\r\n\r\n\r\n for emotion, model_prop in model_dict.items():\r\n preprocessed_X_user = Preprocessor.perform(test_tweets, emotion, user_keyword, task_name)\r\n feature_X_user = Feature_Transformer.perform(preprocessed_X_user, emotion, user_keyword, task_name)\r\n vectorizer = Dictionaries.vectorizer_dict[model_prop[2]]\r\n\r\n #Fit transform the vectorizer with the corresponding preprocessed training data\r\n if os.path.exists(pickle_dir.joinpath(emotion + '_c_train_preprocess_df.pkl')):\r\n preprocess_train_df = pd.read_pickle(pickle_dir.joinpath(emotion + '_c_train_preprocess_df.pkl'))\r\n train_vect = vectorizer.fit_transform(preprocess_train_df['preprocessed_text'].values)\r\n print(emotion + 'TRAIN', train_vect.shape)\r\n train_vect_df = pd.DataFrame(train_vect.toarray(), columns=vectorizer.get_feature_names())\r\n else:\r\n #If the file doesnt exist, exit the program with instructions\r\n print('\\nRequired files does not exist.\\n\\n Please, train the models first by running > Modelling.py')\r\n sys.exit(1)\r\n\r\n # Use the same vectorizer to transform test data and then perform the feature union\r\n vector_X = vectorizer.transform(preprocessed_X_user['preprocessed_text'].values)\r\n test_vect_df = pd.DataFrame(vector_X.toarray(), columns=vectorizer.get_feature_names())\r\n X_test = pd.concat([test_vect_df, feature_X_user], axis=1)\r\n emo_X_test_dict[emotion] = X_test\r\n print(emotion + 'TEST', test_vect_df.shape, X_test.shape)\r\n return emo_X_test_dict", "def fetch_speeddating_df(preprocess=False):\n (train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(\n \"SpeedDating\", \"classification\", astype=\"pandas\", preprocess=preprocess\n )\n orig_X = pd.concat([train_X, test_X]).sort_index()\n orig_y = pd.concat([train_y, test_y]).sort_index()\n if preprocess:\n importance_same_race = pd.Series(\n orig_X[\"importance_same_race\"] >= 9, dtype=np.float64\n )\n samerace = pd.Series(orig_X[\"samerace_1\"] == 1, dtype=np.float64)\n dropped_X = orig_X.drop(labels=[\"samerace_0\", \"samerace_1\"], axis=1)\n encoded_X = dropped_X.assign(\n samerace=samerace, importance_same_race=importance_same_race\n )\n fairness_info = {\n \"favorable_labels\": [1],\n \"protected_attributes\": [\n {\"feature\": \"samerace\", \"reference_group\": [1]},\n {\"feature\": \"importance_same_race\", \"reference_group\": [1]},\n ],\n }\n return encoded_X, orig_y, fairness_info\n else:\n fairness_info = {\n \"favorable_labels\": [\"1\"],\n \"protected_attributes\": [\n {\"feature\": \"samerace\", \"reference_group\": [\"1\"]},\n {\"feature\": \"importance_same_race\", \"reference_group\": [[9, 1000]]},\n ],\n }\n return orig_X, orig_y, fairness_info", "def make_dataset(self):\n # Read raw data\n data = self.read_raw_data()\n self.default_header = list(data.columns.values)\n # Fit the variables on the raw dataset\n self.fit(data.copy())\n return make_df(data, self.features), make_df(data, self.targets)", "def train_model(train_table: pd.DataFrame, viewed_table: pd.DataFrame) -> List:\n\n \"\"\"\"\"\"\n # construct numpy array\n #nan_data = train_table.replace(0.0, np.NaN).to_numpy()\n #user_ratings_mean = np.nanmean(nan_data, axis=1)\n unviewed_table = viewed_table.apply(lambda x: 1 - x)\n # unviewed = unviewed_table.to_numpy()\n\n # construct numpy array\n data = train_table.to_numpy()\n user_ratings_mean = np.mean(data, axis=1)\n # factors in individual interpretation of the scale\n data_demeaned = data - user_ratings_mean.reshape(-1, 1)\n\n # use scipy sparse's svd to avoid 'killed: 9' memory issues\n U, sigma, Vt = svds(data_demeaned, k=25)\n\n sigma = np.diag(sigma)\n\n all_predictions = np.dot(np.dot(U, sigma), Vt) + user_ratings_mean.reshape(-1, 1)\n all_predictions_table = pd.DataFrame(all_predictions).set_index(viewed_table.index)\n all_predictions_table.set_axis(viewed_table.columns, axis='columns', inplace=True)\n\n\n # given already viewed movies a rating of 0. Note these will still be taken ahead of adverse movies\n predictions_table = pd.DataFrame(np.multiply(all_predictions_table,\n unviewed_table.to_numpy()).set_index(viewed_table.index))\n predictions_table.set_axis(viewed_table.columns, axis='columns', inplace=True)\n\n return [all_predictions_table, predictions_table]", "def model_data():\n x_train, y_train, x_val, y_val, x_test, y_test = read_data(\"src/tests/dataclassificationmodel/ferPlus_processed.pbz2\", False)\n return x_train, y_train, x_val, y_val, x_test, y_test", "def get_datasets():\n # Get data\n dataframe = pandas.read_csv('agaricus-lepiota.data')\n\n # Convert data type for all variables\n for column in dataframe:\n dataframe[column] = pandas.Categorical(dataframe[column])\n dataframe[column] = dataframe[column].cat.codes\n\n # Get labels\n target = dataframe.pop('p')\n\n # Get tensors, and split data into training and test sets\n split = int(len(dataframe) * 0.8)\n train_dataset = tf.data.Dataset.from_tensor_slices(\n (dataframe[:split].values, target[:split].values))\n\n train_dataset = train_dataset.shuffle(len(dataframe)).batch(1)\n\n test_dataset = tf.data.Dataset.from_tensor_slices(\n (dataframe[split:].values, target[split:].values))\n\n test_dataset = test_dataset.shuffle(len(dataframe)).batch(1)\n\n return train_dataset, test_dataset", "def get_data():\n\n pathxtrain = sys.argv[1]\n pathxtest = sys.argv[2]\n pathlabeltrain = sys.argv[3]\n pathlabeltest = sys.argv[4]\n\n xtrain = p.read_csv(pathxtrain, header=None)\n xtest = p.read_csv(pathxtest, header=None)\n label_train = p.read_csv(pathlabeltrain, header=None)\n label_test = p.read_csv(pathlabeltest, header=None)\n\n xtrain_mx = xtrain.values\n xtest_mx = xtest.values\n\n label_train = label_train.values.reshape(label_train.shape[0])\n label_test = label_test.values.reshape(label_test.shape[0])\n\n return xtrain_mx, xtest_mx, label_train, label_test", "def pre_process_df_and(train_data, test_data):\n train_data[\"text\"] = train_data[\"sentence1\"] + \", \" + train_data[\"sentence2\"] # noqa\n test_data[\"text\"] = test_data[\"sentence1\"] + \", \" + test_data[\"sentence2\"]\n train_data.drop([\"sentence1\", \"sentence2\"], axis=1, inplace=True)\n test_data.drop([\"sentence1\", \"sentence2\"], axis=1, inplace=True)\n train_data = train_data[[\"text\", \"label\"]]\n test_data = test_data[[\"text\", \"label\"]]\n simple_pre_process_text_df(train_data)\n simple_pre_process_text_df(test_data)\n return train_data, test_data", "def prepare_datasets(target_filename='data'):\n data_cornell = np.array(datasets.readCornellData('__data__/cornell/', max_len=1000000))\n data_opensubs = np.array(datasets.readOpensubsData('__data__/opensubs/', max_len=1000000))\n\n data = np.concatenate([data_cornell, data_opensubs], axis=0)\n del data_cornell, data_opensubs\n\n pd.DataFrame(data, columns=('question', 'answer')).to_feather('__data__/'+target_filename+'.feather')", "def creates_data_loader():\n dataset_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=True)\n\n dataset_no_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=False)\n\n datasets_faces_split = train_val_test(dataset_faces, 0.2, 0.0)\n datasets_no_faces_split = train_val_test(dataset_no_faces, 0.2, 0.0)\n\n datasets = {}\n datasets[\"train\"] = datasets_faces_split[\"train\"] + \\\n datasets_no_faces_split[\"train\"]\n datasets[\"test\"] = datasets_no_faces_split[\"test\"]\n datasets[\"val\"] = datasets_faces_split[\"val\"] + \\\n datasets_no_faces_split[\"val\"]\n\n train_loader = DataLoader(dataset=datasets[\"train\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n\n val_loader = DataLoader(dataset=datasets[\"val\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n return train_loader, val_loader", "def inference(self, inputs):\n # NOTE: This makes the assumption that your model expects text to be tokenized\n # with \"input_ids\" and \"token_type_ids\" - which is true for some popular transformer models, e.g. bert.\n # If your transformer model expects different tokenization, adapt this code to suit\n # its expected input format.\n input_ids = inputs[\"input_ids\"]\n input_ids = input_ids.to(self.device)\n\n coarse_result = self.model.generate(input_ids = input_ids, )\n coarse_result = coarse_result.to(\"cpu\")\n fined_result = self.tokenizer.decode(coarse_result[0].tolist()[inputs[\"original_length\"]+1:],\n skip_special_tokens = True)\n #logger.info(\"Model predicted: '%s'\", fined_result)\n\n return [fined_result]", "def predict_all():\n\n # need train dir to list category names\n cfg = configparser.ConfigParser()\n cfg.read(sys.argv[1])\n base = os.environ['DATA_ROOT']\n eval_type = cfg.get('args', 'eval_type')\n train_xml_dir = os.path.join(base, cfg.get('data', 'train_xml_dir'))\n\n if eval_type == 'sparse':\n predict_sparse(train_xml_dir)\n else:\n predict_dense(train_xml_dir)", "def load_data_wrapper():\r\n \r\n global training_inputs, training_results\r\n global validation_inputs, validation_results\r\n global test_inputs, test_results\r\n global num_samples, numpixels, num_test_samples\r\n \r\n tr_d, va_d, te_d = load_data()\r\n \r\n num_samples=len(tr_d[0])\r\n training_inputs=zeros([num_samples,numpixels])\r\n training_results=zeros([num_samples,10]) \r\n for j in range(num_samples):\r\n training_inputs[j,:] = reshape(tr_d[0][j], (numpixels))\r\n training_results[j,:] = vectorized_result(tr_d[1][j])\r\n# validation_inputs = [reshape(x, (numpixels)) for x in va_d[0]]\r\n# validation_results = [vectorized_result(y) for y in va_d[1]]\r\n\r\n num_test_samples=len(te_d[0])\r\n test_inputs=zeros([num_test_samples,numpixels])\r\n test_results=zeros([num_test_samples,10]) \r\n for j in range(num_test_samples):\r\n test_inputs[j,:] = reshape(te_d[0][j], (numpixels))\r\n test_results[j,:] = vectorized_result(te_d[1][j])", "def make_data(input_filepath, output_filepath):\n\n df_train = pd.read_csv(input_filepath+'train_u6lujuX_CVtuZ9i.csv', index_col=0)\n df_test = pd.read_csv(input_filepath+'test_Y3wMUE5_7gLdaTN.csv', index_col=0)\n print('Sizes', df_train.shape, df_test.shape)\n print(\"Outcome dispersion:\\n\", df_train['Loan_Status'].value_counts())\n\n\n # recode and save outcome vector\n y = df_train['Loan_Status'].map({'N': 0, 'Y': 1})\n\n del df_train['Loan_Status']\n\n # all in one dataframe\n df = pd.concat([df_train, df_test])\n print(df.shape)\n\n from src.features.build_features import make_features\n df = make_features(df)\n\n # Divide data on train and test again and save\n data_train = df[df.index.isin(df_train.index)]\n data_test = df[df.index.isin(df_test.index)]\n print(data_train.shape, data_test.shape)\n\n data_tmp = data_train.copy()\n data_tmp['y'] = y\n\n\n data_tmp.to_csv(output_filepath + 'train_ready.csv', index=False)\n data_test.to_csv(output_filepath + 'test_ready.csv', index=False)\n id_test = pd.DataFrame(data=df_test.index, columns=['Loan_ID'])\n id_test.to_csv(output_filepath + 'id_test.csv', index=False)", "def get_adv_classification_dataset(self) -> pd.DataFrame:\n pass", "def fetch_titanic_df(preprocess=False):\n (train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(\n \"titanic\", \"classification\", astype=\"pandas\", preprocess=preprocess\n )\n orig_X = pd.concat([train_X, test_X]).sort_index()\n orig_y = pd.concat([train_y, test_y]).sort_index()\n if preprocess:\n sex = pd.Series(orig_X[\"sex_female\"] == 1, dtype=np.float64)\n age = pd.Series(orig_X[\"age\"] <= 18, dtype=np.float64)\n dropped_X = orig_X.drop(labels=[\"sex_female\", \"sex_male\"], axis=1)\n encoded_X = dropped_X.assign(sex=sex, age=age)\n fairness_info = {\n \"favorable_labels\": [1],\n \"protected_attributes\": [\n {\"feature\": \"sex\", \"reference_group\": [1]},\n {\"feature\": \"age\", \"reference_group\": [1]},\n ],\n }\n return encoded_X, orig_y, fairness_info\n else:\n fairness_info = {\n \"favorable_labels\": [\"1\"],\n \"protected_attributes\": [\n {\"feature\": \"sex\", \"reference_group\": [\"female\"]},\n {\"feature\": \"age\", \"reference_group\": [[0, 18]]},\n ],\n }\n return orig_X, orig_y, fairness_info", "def create_full_predictions_dataframe(self):\r\n print('\\nSaving Full Predictions as dataframes...')\r\n with open(path.deployment_full_predictions, 'r') as file:\r\n self.full_predictions = json.load(file)\r\n self.read_full_predictions('SVM').to_json(path.deployment_svm_test_results)\r\n print('\\t|--SVM results saved to {}'.format(path.deployment_svm_test_results))", "def train(self, training_data):\n pass", "def get_dataset(self):\n\n trainset = datasets.SVHN('datasets/SVHN/train/', split='train', transform=self.train_transforms,\n target_transform=None, download=True)\n valset = datasets.SVHN('datasets/SVHN/test/', split='test', transform=self.val_transforms,\n target_transform=None, download=True)\n extraset = datasets.SVHN('datasets/SVHN/extra', split='extra', transform=self.train_transforms,\n target_transform=None, download=True)\n\n trainset = torch.utils.data.ConcatDataset([trainset, extraset])\n\n return trainset, valset", "def split_dataset_for_triplet_networks(ds_train, ds_test, ds_info):\n\n def pairing(anchor, positive, negative):\n \"\"\"- normalizes images: `uint8` -> `float32`.\n\n Returns:\n the left image, right image and label in `float32`\n \"\"\"\n return tf.cast(anchor[\"image\"], tf.float32) / 255., tf.cast(positive[\"image\"], tf.float32) / 255., tf.cast(negative[\"image\"], tf.float32)\n\n # initialize a dictionary to store all dataset\n dataset = dict()\n\n # calculate the number of pairs in training, test and validation sset\n num_train = int(ds_info.splits['train'].num_examples / 3 * 0.8)\n num_val = int(ds_info.splits['train'].num_examples / 3 * 0.2)\n num_test = int(ds_info.splits['test'].num_examples / 3)\n\n # split train set into train and validation\n anchor_train = ds_train.take(num_train)\n positive_train = ds_train.skip(num_train).take(num_train)\n negative_train = ds_train.skip(num_train*2).take(num_train)\n train = tf.data.Dataset.zip((anchor_train, positive_train, negative_train))\n\n anchor_val = ds_train.skip(num_train*3).take(num_val)\n positive_val = ds_train.skip(num_train*3 + num_val).take(num_val)\n negative_val = ds_train.skip(num_train*3 + num_val*2).take(num_val)\n val = tf.data.Dataset.zip((anchor_val, positive_val, negative_val))\n\n anchor_test = ds_test.take(num_test)\n positive_test = ds_test.skip(num_test).take(num_test)\n negative_test = ds_test.skip(num_test*2).take(num_test)\n train = tf.data.Dataset.zip((anchor_test, positive_test, negative_test))\n\n # store anchor images, positive images and negative images to three numpy array\n # training set\n train_x_anchor = []\n train_x_positive = []\n train_x_negative = []\n\n for anchor, positive, negative in train:\n anchor_x, positive_x, negative_x = pairing(anchor, positive, negative)\n\n train_x_anchor.append(anchor_x)\n train_x_positive.append(positive_x)\n train_x_negative.append(negative_x)\n\n train_x_anchor = np.array(train_x_anchor)\n train_x_positive = np.array(train_x_positive)\n train_x_negative = np.array(train_x_negative)\n\n # store to dictionary\n dataset[\"train_x_anchor\"] = train_x_anchor\n dataset[\"train_x_positive\"] = train_x_positive\n dataset[\"train_x_negative\"] = train_x_negative\n\n # val set\n val_x_anchor = []\n val_x_positive = []\n val_x_negative = []\n\n for anchor, positive, negative in val:\n anchor_x, positive_x, negative_x = pairing(anchor, positive, negative)\n\n val_x_anchor.append(anchor_x)\n val_x_positive.append(positive_x)\n val_x_negative.append(negative_x)\n\n val_x_anchor = np.array(val_x_anchor)\n val_x_positive = np.array(val_x_positive)\n val_x_negative = np.array(val_x_negative)\n\n # store to dictionary\n dataset[\"val_x_anchor\"] = val_x_anchor\n dataset[\"val_x_positive\"] = val_x_positive\n dataset[\"val_x_negative\"] = val_x_negative\n\n\n # test set\n test_x_anchor = []\n test_x_positive = []\n test_x_negative = []\n\n for anchor, positive, negative in val:\n anchor_x, positive_x, negative_x = pairing(anchor, positive, negative)\n\n test_x_anchor.append(anchor_x)\n test_x_positive.append(positive_x)\n test_x_negative.append(negative_x)\n\n test_x_anchor = np.array(test_x_anchor)\n test_x_positive = np.array(test_x_positive)\n test_x_negative = np.array(test_x_negative)\n\n # store to dictionary\n dataset[\"test_x_anchor\"] = test_x_anchor\n dataset[\"test_x_positive\"] = test_x_positive\n dataset[\"test_x_negative\"] = test_x_negative\n\n return dataset", "def _prepare_for_training(\n self,\n trackers: List[TrackerWithCachedStates],\n domain: Domain,\n precomputations: MessageContainerForCoreFeaturization,\n **kwargs: Any,\n ) -> Tuple[RasaModelData, np.ndarray]:\n training_trackers = self._get_trackers_for_training(trackers)\n # dealing with training data\n tracker_state_features, label_ids, entity_tags = self._featurize_for_training(\n training_trackers,\n domain,\n precomputations=precomputations,\n bilou_tagging=self.config[BILOU_FLAG],\n **kwargs,\n )\n\n if not tracker_state_features:\n return RasaModelData(), label_ids\n\n self._label_data, encoded_all_labels = self._create_label_data(\n domain, precomputations=precomputations\n )\n\n # extract actual training data to feed to model\n model_data = self._create_model_data(\n tracker_state_features, label_ids, entity_tags, encoded_all_labels\n )\n\n if self.config[ENTITY_RECOGNITION]:\n self._entity_tag_specs = (\n self.featurizer.state_featurizer.entity_tag_specs\n if self.featurizer.state_featurizer is not None\n else []\n )\n\n # keep one example for persisting and loading\n self.data_example = model_data.first_data_example()\n\n return model_data, label_ids", "def readData():\n pd.set_option('display.expand_frame_repr', False)\n # read data from training_text\n df_text = pd.read_csv('training_text', sep = '\\|\\|', index_col= 'ID',skip_blank_lines =True, nrows = 10, header = None, skiprows = 1, names = ['ID', 'Text'], engine = 'python', encoding = 'utf-8', dtype = str)\n print(\"TEXT COUNT - \" + str(df_text.count()))\n print(\"MISSING TEXT\")\n missing_text = df_text.isnull().sum()\n print(missing_text)\n \n # read data from training_variants\n df_variants = pd.read_csv('training_variants', skip_blank_lines =True, nrows = 10, index_col= 'ID', header = None, skiprows = 1, names = ['ID','Gene','Variation','Class'], engine = 'python', encoding = 'utf-8', dtype = str)\n print(\"VARIANTS COUNT - \" + str(df_variants.count()))\n print(\"MISSING VARIANTS\")\n missing_variants = df_variants.isnull().sum()\n print(missing_variants)\n # merge both datasets\n df = pd.concat([df_text, df_variants], axis = 1)\n return df", "def prepare_train_validation(self) -> Tuple:\n Xt, Xv, Yt, Yv = self.dataset.train_test_split_representations()\n\n Xt = self.dataset.prepare_input_samples(Xt)\n Yt = self.dataset.prepare_output_samples(Yt)\n traindataset = tf.data.Dataset.from_tensor_slices((Xt, Yt))\n traindataset = traindataset.batch(\n self.batch_size,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n Xv = self.dataset.prepare_input_samples(Xv)\n Yv = self.dataset.prepare_output_samples(Yv)\n validdataset = tf.data.Dataset.from_tensor_slices((Xv, Yv))\n validdataset = validdataset.batch(\n self.batch_size,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n return traindataset, validdataset", "def generate_datasets(self) -> (tf.data.Dataset, tf.data.Dataset):\n self.obtain_meta_data_frame_for_available_lightcurves()\n positive_example_paths = self.meta_data_frame[self.meta_data_frame['disposition'] == 'PC']['lightcurve_path']\n print(f'{len(positive_example_paths)} positive examples.')\n negative_example_paths = self.meta_data_frame[self.meta_data_frame['disposition'] != 'PC']['lightcurve_path']\n print(f'{len(negative_example_paths)} negative examples.')\n positive_datasets = self.get_training_and_validation_datasets_for_file_paths(positive_example_paths)\n positive_training_dataset, positive_validation_dataset = positive_datasets\n negative_datasets = self.get_training_and_validation_datasets_for_file_paths(negative_example_paths)\n negative_training_dataset, negative_validation_dataset = negative_datasets\n training_dataset = self.get_ratio_enforced_dataset(positive_training_dataset, negative_training_dataset,\n positive_to_negative_data_ratio=1)\n validation_dataset = positive_validation_dataset.concatenate(negative_validation_dataset)\n if self.trial_directory is not None:\n self.log_dataset_file_names(training_dataset, dataset_name='training')\n self.log_dataset_file_names(validation_dataset, dataset_name='validation')\n training_dataset = training_dataset.shuffle(buffer_size=len(list(training_dataset)))\n training_preprocessor = lambda file_path: tuple(tf.py_function(self.training_preprocessing,\n [file_path], [tf.float32, tf.float32]))\n training_dataset = training_dataset.map(training_preprocessor, num_parallel_calls=16)\n training_dataset = training_dataset.padded_batch(self.batch_size, padded_shapes=([None, 2], [None])).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n validation_preprocessor = lambda file_path: tuple(tf.py_function(self.evaluation_preprocessing,\n [file_path], [tf.float32, tf.float32]))\n validation_dataset = validation_dataset.map(validation_preprocessor, num_parallel_calls=4)\n validation_dataset = validation_dataset.padded_batch(1, padded_shapes=([None, 2], [None])).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n return training_dataset, validation_dataset", "def build_inference_graph(self):\n self.build_train_graph()", "def _train(self):\n self.train_acc.reset_states()\n self.val_acc.reset_states()\n self.train_loss.reset_states()\n self.val_loss.reset_states()\n\n self.train_ds.shuffle(buffer_size=1000)\n for idx, (x,y) in enumerate(self.train_ds):\n self.tf_train_step(x, y)\n\n for x,y in self.val_ds:\n self.tf_val_step(x, y)\n\n # It is important to return tf.Tensors as numpy objects.\n return {\n \"epoch\": self.iteration,\n \"loss_train\": self.train_loss.result().numpy(),\n \"loss_val\": self.val_loss.result().numpy(),\n \"acc_train\": self.train_acc.result().numpy(),\n \"acc_val\": self.val_acc.result().numpy(),\n }" ]
[ "0.6369666", "0.63546294", "0.63196236", "0.62988764", "0.6283459", "0.62768555", "0.6249582", "0.6194825", "0.6187834", "0.61582404", "0.6153339", "0.6152186", "0.6096578", "0.6096578", "0.6080581", "0.60757023", "0.6061717", "0.6044782", "0.60430914", "0.6011937", "0.5976109", "0.5970796", "0.5957939", "0.5935491", "0.59323853", "0.5929659", "0.59218836", "0.5921361", "0.59130055", "0.59090245", "0.5908526", "0.5903658", "0.5877689", "0.5874826", "0.58666784", "0.5863793", "0.58583176", "0.58490235", "0.584764", "0.5841336", "0.58390105", "0.5838528", "0.5837511", "0.58365226", "0.58224225", "0.5820164", "0.5806822", "0.5804681", "0.5798093", "0.5796851", "0.57897186", "0.5785294", "0.5784981", "0.5781549", "0.57813054", "0.5780863", "0.57777965", "0.5777055", "0.577091", "0.5767514", "0.5763553", "0.57624876", "0.57612413", "0.57565606", "0.5754833", "0.5749671", "0.5747781", "0.5745333", "0.5744892", "0.5739221", "0.5735242", "0.57308406", "0.5715559", "0.5714997", "0.5714215", "0.57058066", "0.57044786", "0.56965744", "0.5694339", "0.5689636", "0.5688659", "0.56860137", "0.5685403", "0.56840986", "0.56759197", "0.56735", "0.567324", "0.5670994", "0.5669918", "0.56620526", "0.5658426", "0.56492615", "0.5648734", "0.56481737", "0.5644434", "0.5642712", "0.5636706", "0.56309754", "0.5625974", "0.5619304" ]
0.66650474
0
Call the shell script that handles BLAST database formatting.
def format_blast(makeblastdb_path, fname): # The script is written in shell, so this function just calls it and # checks the output # Build the shell command cmd = ['bash', DBFORMAT_SCRIPT, makeblastdb_path, fname] # Execute the script # shell=False to ensure that we aren't executing commands from untrusted # sources p = subprocess.Popen( cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() return (out, err)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command_dbtool(self):\n dbtool.main(*self.args())", "def makeblastdb(files, db_name, db_type):\n with open(db_name + \".pin\", \"w\") as f:\n f.write(\"\\n\".join(db_name))\n return subprocess.run([\"makeblastdb\", \"-in\", db_name + \".pin\", \"-dbtype\", db_type)", "def blastn_commandline(cls):\n command = generate_path(\"../../blast/ncbi-blast*/bin/blastn\")\n fasta = generate_path(\"tmp/validate.fasta\")\n db = generate_path(\"data/blast/ValidationDB\")\n results = generate_path(\"tmp/validate.xml\")\n\n subprocess.call(\n '%s -query %s -db %s -outfmt 5 -out %s -best_hit_score_edge 0.05 '\n '-best_hit_overhang 0.1' % (\n command, fasta, db, results\n ), shell=True\n )", "def blastp(database, query, output_to_file = False, output_file = None,\n overwrite = False, outfmt = 7):\n if output_to_file:\n if os.path.exists(output_file) and not overwrite:\n return output_file\n cmd = 'blastp -db {} -query {} -outfmt {} -out {} -num_alignments 1'.\\\n format(database, query, outfmt, output_file)\n else:\n cmd = 'blastp -db {} -query {} -outfmt {} -num_alignments 1'.format(\n database, query, outfmt)\n\n printed_output = subprocess.check_output(cmd, shell=True)\n if output_to_file:\n return output_file\n return printed_output", "def blast_database(target, dbtype, output_to_file = False, output_file = None,\n overwrite = False):\n if output_to_file:\n if os.path.exists(output_file) and not overwrite:\n return output_file\n cmd = 'makeblastdb -in {} -dbtype {} -out {}'.format(target, dbtype, output_file)\n else:\n cmd = 'makeblastdb -in {} -dbtype {}'.format(target, dbtype)\n printed_output = subprocess.check_output(cmd, shell=True)\n\n if output_to_file:\n return output_file\n\n return printed_output", "def main():\n count = 0\n\n # Read in the required files and filenames.\n predicted_proteins, protein_db, output_file_aug_to_fasta, \\\n output_file_proteins_to_db, blastp_output, output_to_file, \\\n overwrite = call_files()\n\n # Write all entries in the AUGUSTUS output to a FASTA file\n for record in split_records_aug(predicted_proteins):\n if count == 0:\n mode = 'w'\n else:\n mode = 'a'\n write_fasta(record, output_file_aug_to_fasta, mode)\n count += 1\n\n # Create a blast database and carry out a blastp search\n blast_db = blast_database(protein_db, 'prot', True,\n output_file_proteins_to_db, overwrite)\n\n blastp_file = blastp(output_file_proteins_to_db, output_file_aug_to_fasta,\n True, blastp_output, overwrite, 7)\n\n # Parse the blastp results for the desired information\n blast_results = parse_blastp_output(blastp_output)\n\n # Print the results\n print_output(blast_results)", "def main():\n task_init(authorization_action='runbibformat',\n authorization_msg=\"BibReformat Task Submission\",\n description=\"\"\"\nBibReformat formats the records and saves the produced outputs for\nlater retrieval.\n\nBibReformat is usually run periodically via BibSched in order to (1)\nformat new records in the database and to (2) reformat records for\nwhich the meta data has been modified.\n\nBibReformat has to be run manually when (3) format config files have\nbeen modified, in order to see the changes in the web interface.\n\nAlthough it is not necessary to run BibReformat to display formatted\nrecords in the web interface, BibReformat allows to improve serving\nspeed by precreating the outputs. It is suggested to run\nBibReformat for 'HB' output.\n\nOption -m cannot be used at the same time as option -c.\nOption -c prevents from finding records in private collections.\n\nExamples:\n bibreformat Format all new or modified records (in HB).\n bibreformat -o HD Format all new or modified records in HD.\n bibreformat -o HD,HB Format all new or modified records in HD and HB.\n\n bibreformat -a Force reformatting all records (in HB).\n bibreformat -c 'Photos' Force reformatting all records in 'Photos' collection (in HB).\n bibreformat -c 'Photos' -o HD Force reformatting all records in 'Photos' collection in HD.\n\n bibreformat -i 15 Force reformatting record 15 (in HB).\n bibreformat -i 15:20 Force reformatting records 15 to 20 (in HB).\n bibreformat -i 15,16,17 Force reformatting records 15, 16 and 17 (in HB).\n\n bibreformat -n Show how many records are to be (re)formatted.\n bibreformat -n -c 'Articles' Show how many records are to be (re)formatted in 'Articles' collection.\n\n bibreformat -oHB -s1h Format all new and modified records every hour, in HB.\n\"\"\", help_specific_usage=\"\"\" -o, --formats \\t Specify output format/s (default HB)\n -n, --noprocess \\t Count records to be formatted (no processing done)\nReformatting options:\n -a, --all \\t Force reformatting all records\n -c, --collection \\t Force reformatting records by collection\n -f, --field \\t Force reformatting records by field\n -p, --pattern \\t Force reformatting records by pattern\n -i, --id \\t Force reformatting records by record id(s)\nPattern options:\n -m, --matching \\t Specify if pattern is exact (e), regular expression (r),\n \\t partial (p), any of the words (o) or all of the words (a)\n\"\"\",\n version=__revision__,\n specific_params=(\"ac:f:p:lo:nm:i:\",\n [\"all\",\n \"collection=\",\n \"matching=\",\n \"field=\",\n \"pattern=\",\n \"format=\",\n \"noprocess\",\n \"id=\"]),\n task_submit_check_options_fnc=task_submit_check_options,\n task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,\n task_run_fnc=task_run_core)", "def main():\n cur, conn = connect('dwh.cfg')\n \n set_schema = schema_queries[1]\n cur.execute(set_schema)\n \n print('Loading Staging Tables.')\n load_staging_tables(cur, conn)\n \n print('Inserting Rows.')\n insert_tables(cur, conn)\n\n \n conn.close()", "def build_db(db_fasta, out_db, input_type='fasta'):\n subprocess.run(\n 'makeblastdb -dbtype nucl -in %s -input_type %s -parse_seqids -out %s'\n % (db_fasta, input_type, out_db),\n shell=True,\n env={'PATH': BLAST_PATH}\n )", "def make_user_database():\n createblast_out, createblast_error = Popen([\"makeblastdb\", \"-in\", args.blast_database, \"-dbtype\", \"nucl\"], stdout=PIPE, stderr=PIPE).communicate()\n admin_log(createblast_out, createblast_error, \"create database:\")", "def db_shell(ctx, db_key=None):\n ctx.run('pgcli -h {db_host} -d {db_name} -U {db_user}'.format(**get_database_settings(db_key)), pty=True)", "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['STAGE'].values()))\n cur = conn.cursor()\n \n #remove the existing tables\n drop_tables(cur, conn)\n \n #replace the tables with new ones\n create_tables(cur, conn)\n \n #add missing postcode value into table\n default_missing_values(cur, conn)\n \n conn.close()", "def _create_execute_blastdbcmd(execute_command):\n\n def execute_blastdbcmd(input_file: str, sequence_file: str, database: str):\n cmd = \"{} -db {} -entry_batch {} > {}\".format(\n BLASTDBCMD_CMD, database, input_file, sequence_file)\n execute_command(cmd)\n\n return execute_blastdbcmd", "def formatdb(fastadata={},fname=\"\"):\n if not fastadata and fname:\n OSsystem(\"%s -i %s\" % (FORMATDB_PATH,fname))\n elif fastadata and fname:\n pass\n else:\n raise \"inproper input\"\n return fname", "def format():\n isort = 'isort -rc *.py app/'\n yapf = 'yapf -r -i *.py app/'\n\n print('Running {}'.format(isort))\n subprocess.call(isort, shell=True)\n\n print('Running {}'.format(yapf))\n subprocess.call(yapf, shell=True)", "def run_blast(inputfile, input_type, outputfile, database, args=None, verbose=True):\n\n assert (input_type in ['protein', 'dna']), \"Input type must be either 'protein' or 'dna'\"\n\n cmd = ['diamond']\n\n if input_type == 'protein':\n cmd += ['blastp']\n elif input_type == 'dna':\n cmd += ['blastx']\n\n cmd += ['-d', database]\n cmd += ['-q', inputfile]\n cmd += ['-o', outputfile]\n\n if not args:\n args = \"--more-sensitive --top 10 --quiet\"\n\n cmd += args.split()\n\n if verbose:\n print(' '.join(cmd))\n\n with open(os.devnull, 'w') as devnull:\n try:\n exit_code = call(cmd, stdout=devnull)\n except OSError:\n exit_code = None\n\n return exit_code", "def populate_db_command():\n print(\"Populating DB with sample data.\")\n populate_db()\n print \"Done\"", "def format_bash(self,query_results):\n data=query_results.data\n \n name=\"ddb\"\n\n print (\"{0}_row_length={1}\".format(name,len(data)))\n print (\"{0}_column_length={1}\".format(name,len(query_results.columns)))\n print (\"\")\n\n column_index=0\n for column in query_results.columns:\n print(\"{0}_columns['{1}']='{2}'\".format(name,column_index,column))\n column_index+=1\n\n\n row_index=0\n for row in data:\n for column_index in range(0,len(query_results.columns)):\n print('{0}_data[{1}][{2}]=\"{3}\"'.format(name,row_index,column_index,row['data'][column_index]))\n row_index+=1\n # TODO return output for this\n return \"\"", "def main():\r\n db = connect_database()\r\n with db:\r\n if sys.argv[1] == \"-s\":\r\n select_all(db, sys.argv[2])\r\n elif sys.argv[1] == \"-i\":\r\n cus_data = []\r\n for i in range(2, len(sys.argv)):\r\n cus_data.append(sys.argv[i])\r\n insert_customer(db, cus_data)\r\n elif sys.argv[1] == \"-c\":\r\n create_tables()\r\n elif sys.argv[1] == \"-pw\":\r\n pop_waiting(db, sys.argv[2])\r\n elif sys.argv[1] == \"-ph\":\r\n pop_help(db, sys.argv[2])\r\n elif sys.argv[1] == \"-r\":\r\n refresh_tables(db)\r\n elif sys.argv[1] == \"-e\":\r\n export_helped_table(db)\r\n else:\r\n print errorArgument\r\n db.close()", "def init_db_command():\n init_db()\n # click.command() defines a command line command called init-db that calls the init_db function and shows a success message to the user. \n click.echo('Initialized the database.')", "def createdb(dbname):\n os.system(\"createdb -w %s\" % dbname)", "def run_db(args):\n # print(\"running chronqc_db\")\n chronqc_db.main(args)", "def create_blast_db(self):\n print(\"Creating blast db\")\n if self.mask:\n command = 'dustmasker -in ' + self.seq_file + ' -infmt fasta '\n command += '-outfmt maskinfo_asn1_bin -out ' + self.seq_file + '_dust.asnb'\n subprocess.check_output(command, shell=True) # identifying low-complexity regions.\n\n command = 'makeblastdb -in ' + self.seq_file + ' -input_type fasta -dbtype nucl '\n command += '-mask_data ' + self.seq_file + '_dust.asnb '\n command += '-out ' + self.seq_file + ' -title \"Whole Genome without low-complexity regions\"'\n subprocess.check_output(command, shell=True) # Overwriting the genome file.\n else:\n command = 'makeblastdb -in ' + self.seq_file + ' -input_type fasta -dbtype nucl '\n command += '-out ' + self.seq_file + ' -title \"Whole Genome unmasked\"'\n subprocess.check_output(command, shell=True)", "def initdb_command():\n init_db()", "def initdb_command():\n init_db()", "def structure_and_repopulate_db() -> None:\n with open('db.sql', encoding=\"utf-8\") as f:\n commands = f.read().strip().split(';')\n commands = [command.strip() for command in commands]\n for command in commands:\n my_cursor.execute(command)\n my_db.commit()\n print('Source structure created, data repopulated')", "def initdb_command():\r\n init_db()\r\n print('Initialized the database.')", "def initdb_command():\n init_db()\n print('Initialized the database.')", "def initdb_command():\n init_db()\n print('Initialized the database.')", "def initdb_command():\n init_db()\n print('Initialized the database.')", "def initdb_command():\n init_db()\n print('Initialized the database.')", "def initdb_command():\n init_db()\n print('Initialized the database.')", "def main(args):\n\n\t# Setting logging format and default level\n\tlogging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)\n\n\tmsg = [\n\t\t'Connection created.',\n\t\t'Connection closed.',\n\t\t'Cursor created.',\n\t\t'Cursor closed.',\n\t\t'Changes committed.',\n\t\t'Statement executed:',\n\t\t'Transaction rollback:'\n\t]\n\n\t# Process args\n\targs = parse_args(args)\n\tscript_path = args.path\n\tconfig_path = args.config\n\n\t# Read config file\n\tcfg = read_config(config_path)\n\n\ttry:\n\t\tcnx = connect_to_db(cfg['mysql'])\n\t\tlogging.info(msg[0])\n\n\t\t# Create cursor\n\t\tcursor = cnx.cursor()\n\t\tlogging.info(msg[2])\n\n\t\t# Open and read the file as a single buffer\n\t\tfd = open(script_path, 'r')\n\t\tsql_script = fd.read()\n\t\tfd.close()\n\n\t\t# Get all SQL statements (split on ';')\n\t\tsql_statements = sql_script.split(';')\n\n\t\t# Execute SQL statements\n\t\tfor statement in sql_statements:\n\t\t\tif not statement.strip():\n\t\t\t\tcontinue\n\t\t\tcursor.execute(statement)\n\t\t\tlogging.info(msg[5] + ' %s' % str(statement) + '\\n')\n\n\t\t# Commit changes\n\t\tcnx.commit()\n\t\tlogging.info(msg[4])\n\n\texcept MySQLdb.Error as err:\n\t\t# Note: catching client.OperationalError as error reports back skipped statements\n\t\t# such as CREATE TABLE IF NOT EXISTS [table_name] as errors even though existing\n\t\t# table in fact exists.\n\n\t\tcnx.rollback()\n\t\tlogging.warning(msg[6] + ' %s' % str(err) + '\\n')\n\n\tfinally:\n\t\tcursor.close()\n\t\tlogging.info(msg[3])\n\t\tcnx.close()\n\t\tlogging.info(msg[1])", "def _blast(query, output_pssm, output, blastdb):\n psiblast_command = \"psiblast -db {:} -query {:} -out_ascii_pssm {:} \" + \\\n \"-save_pssm_after_last_round -out {:}\"\n log_out = \"{}.out\".format(output)\n log_err = \"{}.err\".format(output)\n with open(log_out, 'a') as f_out:\n with open(log_err, 'a') as f_err:\n command = psiblast_command.format(\n blastdb, query, output_pssm, output)\n f_out.write('=================== CALL ===================\\n')\n f_out.write(command + '\\n')\n subprocess.check_call(\n command, shell=True, stderr=f_err, stdout=f_out)\n f_out.write('================= END CALL =================\\n')", "def initdb_cmd():\n init_db()\n print(\"database initialized\")", "def command_gtf2db(raw_args, prog=None):\n\n if prog:\n parser = argparse.ArgumentParser(prog=prog, add_help=False)\n else:\n parser = argparse.ArgumentParser(add_help=False)\n\n def print_message(message):\n if message:\n sys.stderr.write(message)\n else:\n sys.stderr.write(command_gtf2db.__doc__)\n sys.stderr.write('\\n')\n sys.exit(1)\n\n parser.error = print_message\n\n # required\n parser.add_argument(\"-i\", \"--input\", dest=\"input\", metavar=\"GTF_file\")\n parser.add_argument(\"-o\", \"--output\", dest=\"output\", metavar=\"DB_file\")\n\n # debugging and help\n parser.add_argument(\"-h\", \"--help\", dest=\"help\", action='store_true')\n parser.add_argument(\"-d\", \"--debug\", dest=\"debug\", action=\"count\", default=0)\n\n args = parser.parse_args(raw_args)\n\n g2g.configure_logging(args.debug)\n\n if args.help:\n g2g.exit(\"\", parser)\n\n if not args.input:\n g2g.exit(\"No GTF file was specified.\", parser)\n\n if not args.output:\n g2g.exit(\"No output GTG DB file was specified.\", parser)\n\n try:\n gtf_db.gtf2db(args.input, args.output)\n except KeyboardInterrupt as ki:\n LOG.debug(ki)\n except exceptions.G2GValueError as e:\n g2g.exit(e, parser)\n except exceptions.G2GError as e:\n g2g.exit(e, parser)", "def main(argv, out=print):\n opts = parser.parse_args(argv[1:])\n out(generate_sql(vars(opts)))", "def stampdb(self, args):\n revision = REVISION_MAPPING[args.configversion]\n print(f\"Based on config version {args.configversion} \"\n f\"we think your results schema is version {revision} and are upgrading to it\")\n stamp_db(revision, args.dbfile)", "def do_command(self, args):\n chk_arg_count(args, 0)\n dbops.init_database()", "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(config['CLUSTER']['HOST'], config['CLUSTER']['DB_NAME'], config['CLUSTER']['DB_USER'], config['CLUSTER']['DB_PASSWORD'], config['CLUSTER']['DB_PORT']))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()", "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()", "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()", "def test_blast_genome(self):\r\n\r\n formatdb_cmd = 'formatdb -p F -o T -i %s' % self.subjectdb_fp\r\n system(formatdb_cmd)\r\n self._paths_to_clean_up.append(\"formatdb.log\")\r\n for suffix in [\"nhr\", \"nin\", \"nsd\", \"nsi\", \"nsq\"]:\r\n self._paths_to_clean_up.append(\".\".join(\r\n [self.subjectdb_fp, suffix]))\r\n\r\n raw_output = blast_genome(TEST_BLAST_DB_LINES, self.subjectdb_fp,\r\n e_value=1e-4, max_hits=100, word_size=28,\r\n working_dir=\"./\", blast_mat_root=None)\r\n\r\n i = 0\r\n for line in raw_output:\r\n\r\n if line.startswith(\"#\"):\r\n i += 1\r\n continue # comments depend on tmpfilename, BLAST version\r\n self.assertEqual(raw_output[i], EXP_BLAST_OUTPUT[i])\r\n i += 1", "def bids_cli():", "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} \\\n port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()", "def main():\n \n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()", "def init_db_command():\n init_db()\n click.echo(\"Initialized the database.\")", "def init_db_command():\n init_db()\n click.echo(\"Initialized the database.\")", "def gtfsdb_main(ctx, database):\n ctx.obj = dict()\n if not database and os.path.exists(DEFAULT_CONFIG_FILE):\n conf = json.load(open(DEFAULT_CONFIG_FILE, 'r'))\n database = conf['database']\n ctx.obj.update(dict(conf=conf))\n else:\n click.echo(\"No database selected!!\")\n sys.exit(1)\n ctx.obj.update(dict(database=Database(url=database), db_url=database))", "def main(args): \n if args.type == 'FILEGDB':\n create_filegdb(args.name, args.path)\n elif args.type == 'ST_GEOMETRY' or args.type == 'SPATIALITE':\n create_sqlitedb(args.name, args.type, args.path)", "def main():\n parser = argparse.ArgumentParser(\n description='Convert Thunderbird address ldif to your LDAP ldif,'\n ' or the reverse.')\n parser.add_argument('-b',\n metavar='BASE_PATH',\n dest='base_path',\n default='',\n help='ldap base path')\n parser.add_argument('-f',\n metavar='FILE',\n dest='fname',\n type=argparse.FileType(),\n required=True,\n help='ldif file')\n\n args = parser.parse_args()\n convert(args.fname, args.base_path)", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def main():\n\n parser = argparse.ArgumentParser(\n description='Perform initial loading of build database from manifests'\n )\n parser.add_argument('-d', '--debug', action='store_true',\n help='Enable debugging output')\n parser.add_argument('-c', '--config', dest='db_repo_config',\n help='Configuration file for build database loader',\n default='build_db_loader_conf.ini')\n\n args = parser.parse_args()\n\n # Initialize logging\n logging.basicConfig(\n stream=sys.stderr,\n format='%(asctime)s: %(levelname)s: %(message)s',\n level=logging.DEBUG if args.debug else logging.INFO\n )\n\n # Check configuration file information\n db_repo_config = configparser.ConfigParser()\n db_repo_config.read(args.db_repo_config)\n\n if any(key not in db_repo_config\n for key in ['build_db', 'repos', 'email']):\n logging.error(\n f'Invalid or unable to read config file {args.db_repo_config}'\n )\n sys.exit(1)\n\n db_info = db_repo_config['build_db']\n db_required_keys = ['db_uri', 'username', 'password']\n\n if any(key not in db_info for key in db_required_keys):\n logging.error(\n f'One of the following DB keys is missing in the config file:\\n'\n f' {\", \".join(db_required_keys)}'\n )\n sys.exit(1)\n\n repo_info = db_repo_config['repos']\n repo_required_keys = ['manifest_dir', 'manifest_url', 'repo_basedir']\n\n if any(key not in repo_info for key in repo_required_keys):\n logging.error(\n f'One of the following repo keys is missing in the '\n f'config file:\\n {\", \".join(repo_required_keys)}'\n )\n sys.exit(1)\n\n email_required_keys = ['smtp_server', 'receivers']\n email_info = db_repo_config['email']\n\n if any(key not in email_info for key in email_required_keys):\n logging.error(\n f'One of the following email keys is missing in the config '\n f'file:\\n {\", \".join(email_required_keys)}'\n )\n sys.exit(1)\n\n # Setup loader, read in latest manifest processed, get build manifest\n # information, checkout/update build manifest repo and walk it,\n # generating or update the project documents, then generating or\n # updating the build documents, then the new commits for the build,\n # and then linking the build and commit entries to each other as needed,\n # finishing with updating the last manifest document (needed to do\n # incremental updates or restart an interrupted loading run)\n build_db_loader = BuildDBLoader(db_info, repo_info, email_info)\n last_manifest = build_db_loader.get_last_manifest()\n manifest_repo = repo_info['manifest_dir']\n\n logging.info('Checking out/updating the build-manifests repo...')\n cbutil_git.checkout_repo(manifest_repo, repo_info['manifest_url'])\n\n logging.info(f'Creating manifest walker and walking it...')\n if last_manifest:\n logging.info(f' starting after commit {last_manifest[0]}...')\n\n manifest_walker = cbutil_git.ManifestWalker(manifest_repo, last_manifest)\n\n for commit_info, manifest_xml in manifest_walker.walk():\n try:\n manifest_info = build_db_loader.get_manifest_info(manifest_xml)\n except mf_parse.InvalidManifest as exc:\n # If the file is not an XML file, simply move to next one\n logging.info(f'{commit_info[0]}: {exc}, skipping...')\n continue\n\n build_db_loader.update_project_documents(manifest_info)\n\n build_data = build_db_loader.generate_build_document(commit_info,\n manifest_info)\n\n build_db_loader.generate_commit_documents(build_data, manifest_info)\n\n if not build_db_loader.first_prod_ver_build:\n build_db_loader.update_build_commit_documents(build_data)\n\n logging.debug('Updating last manifest document...')\n build_db_loader.update_last_manifest(build_data['manifest_sha'])", "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n print('config file read! - now connecting...')\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n\n print('connection established! - now dropping tables...')\n drop_tables(cur, conn)\n\n print('tables dropped! - now creating tables...')\n create_tables(cur, conn)\n\n print('tables are now created! - closing connection...')\n conn.close()\n\n print('done!')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def main():\n\n # open a connection to Cloud SQL\n conn_str = (\n f\"DRIVER={{{ODBC_DRIVER}}};SERVER={PROXY_ADDR};UID={DB_USER};PWD={DB_PASS}\"\n )\n with pyodbc.connect(conn_str, autocommit=True).cursor() as cursor:\n\n # create the database\n database_create(cursor=cursor, database=DB_NAME, drop=True)\n\n # create the tables\n sql_script = f\"\"\"USE {DB_NAME};\nCREATE TABLE jobdef (jobtype CHAR(20) PRIMARY KEY, start_page VARCHAR(120),\n single_domain BIT, subpath VARCHAR(120), max_pages INT,\n daily BIT);\nCREATE TABLE jobhist (job_id INT IDENTITY(1,1) PRIMARY KEY, jobtype CHAR(20),\n queued DATETIME2, jobstart DATETIME2, jobend DATETIME2,\n elapsed INT, links INT, pages INT, missing INT);\nCREATE TABLE crawled (job_id INT, page_url VARCHAR(120), crawled DATETIME2);\nCREATE TABLE notfound (job_id INT, found DATETIME2, source VARCHAR(120),\n target VARCHAR(120), link_text VARCHAR(120));\"\"\"\n cursor.execute(sql_script)\n cursor.commit()\n\n # load sample data from CSV files into each of the tables\n for table in [\"jobdef\", \"jobhist\", \"crawled\", \"notfound\"]:\n csv_insert(\n cursor=cursor,\n database=DB_NAME,\n table=table,\n filename=f\"initdata\\\\{table}.csv\",\n )\n table_print(cursor=cursor, table=table)", "def init_db_command():\n click.echo(\"Initializing the database.\")\n init_db()\n click.echo(\"Initialized the database.\")", "def main() -> None:\n\n # TODO - Gather this from whatever mechanism is needed\n data = {\n \"family_members\": \"\",\n \"social_status\": \"\",\n \"gender\": \"\",\n \"date_birth\": \"\",\n \"id_number\": 0,\n \"mail\": \"\",\n \"name\": \"\",\n \"id\": \"\",\n }\n\n # Grab an active cursor object\n cursor = db_connection.cursor()\n\n # We know the DB is the part that can break, so we'll wrap this first\n try:\n update(cursor, data)\n print(\"Yay this worked\") # Check the DB and give some user feedback\n except pymysql.DatabaseError as e:\n print(str(e)) # Prints the basic error you encountered\n # If you hit this section, get rid of the try/except or use pdb to walk the error\n # There are multiple ways to print the traceback\n # IMHO the easiest is to just not catch the exception\n\n # This should only really be terminated when the application is done\n db_connection.close()", "def _build_db_form_script(cur, db_name):\n building_script = os.path.abspath(os.path.join(gconf.DB_builder.DB_DIR, gconf.DB_builder.SQL_CONSTRUCTION))\n logger.info(\"`{}` database was not found. Running database building script: {}\".format(db_name.title(),\n building_script))\n for statement in parse_sql(filename=building_script):\n cur.execute(statement)", "def init_db_command():\r\n init_db()\r\n click.echo('Initialized the database.')", "def main():\n # Declaration\n cnt_errors = 0\n\n # Get the script path, all DBF files inside this path will be converted into CSV.\n script_path = os.path.dirname(__file__)\n\n # Clear the console screen\n clear()\n\n # Script is starting to find all DBF files.\n print('Script is searching for DBF files.')\n\n # Search for DBF files inside the script path.\n for dirpath, dirname, filenames in os.walk(script_path):\n for filename in filenames:\n if filename.endswith(\".dbf\"):\n print(\"Convert: {filename} to .csv\".format(filename=filename))\n\n # Combine both strings\n full_path = dirpath + \"\\\\\" + filename\n\n # Try to load the DBF file\n try:\n table = dbfread.DBF(full_path, encoding=\"windows-1252\", ignore_missing_memofile=False)\n except dbfread.exceptions.DBFNotFound as dbf_exc:\n print(\"Error occurred: \\n{file} \\n{error}\".format(file=filename, error=dbf_exc))\n cnt_errors += 1\n continue\n\n # Load data from table into an DataFrame.\n df = pd.DataFrame(iter(table))\n\n # Remove last four characters.\n csv_file = filename[:-4] + \".csv\"\n\n # Join the script path.\n output_path_csv = os.path.join(script_path, csv_file)\n\n # Print a message and create the csv file.\n print(\"Convert: {filename} to .csv\".format(filename=filename))\n df.to_csv(output_path_csv, sep=';')\n\n # Print out amount of not converted DBF files.\n if cnt_errors > 0:\n print('Amount of not converted files: {}'.format(cnt_errors))", "def main():\n\n parser = argparse.ArgumentParser(description='Query Chado.')\n parser.add_argument('-s', '--pgserver', help='Postgres server', required=True)\n parser.add_argument('-d', '--database', help='Reporting database', required=True)\n parser.add_argument('-u', '--username', help='Postgres User name', required=True)\n parser.add_argument('-p', '--password', help='Postgres password', required=True)\n parser.add_argument('-r', '--release', help='FlyBase release used', required=True)\n\n args = parser.parse_args() \n server = args.pgserver\n database = args.database\n username = args.username\n password = args.password\n release = args.release\n \n # Define connection\n conn_string = \"host='%s' dbname='%s' user='%s' password='%s'\" % (server, database, username, password)\n filename = \"physical_interactions_mitab_%s_reporting.tsv\" % (release)\n\n # Attempt to get a connection\n conn = psycopg2.connect(conn_string)\n query_for_ints(filename,conn)\n\n # Close the connection\n conn.close()", "def main():\n\n # Has oceanview done something? If this is still false by the end,\n # Display the Usage information.\n did_something = False\n\n # The user wants to clear the database.\n if 'cleardb' in sys.argv:\n did_something = True\n print(\"It's sqlite, just delete the file.\")\n\n # The user wants the test data added to the database.\n if 'maketestdb' in sys.argv:\n did_something = True\n database = data.Database(\"db.sqlite\", \"database/build_db.sql\")\n dbutil.add_test_data(database)\n\n # The user wants the front end launched\n if 'front' in sys.argv or 'both' in sys.argv:\n did_something = True\n frontend = front.init()\n frontend.run(INTERFACE, 8000)\n\n # The user wants the back end launched.\n if 'back' in sys.argv or 'both' in sys.argv:\n did_something = True\n backend = back.init()\n backend.run(INTERFACE, 80)\n\n # did_something is False, nothing was done, show the usage info.\n if did_something is False:\n print(\"Usage: python oceanview.py [command]\")\n print(\"COMMANDS:\")\n print(\" front - start the frontend\")\n print(\" back - start the backend\")\n print(\" both - start both\")\n print(\" maketestdb - add test data to the database\")", "def init_db_command():\n db_init()\n click.echo('Initialized the database.')", "def main():\n if len(sys.argv) == 4:\n\n messages_path, categories_path, database_path = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_path, categories_path))\n df = load_data(messages_path, categories_path)\n\n print('Cleaning data...')\n df = clean_data(df)\n\n print('Saving data...\\n DATABASE: {}'.format(database_path))\n save_data(df, database_path)\n\n print('Cleaned data saved to database!')\n\n else:\n print('Please provide the filepaths of the messages and categories '\n 'datasets as the first and second argument respectively, as '\n 'well as the filepath of the database to save the cleaned data '\n 'to as the third argument. \\n\\nExample: python process_data.py '\n 'disaster_messages.csv disaster_categories.csv '\n 'DisasterResponse.db')", "def main():\n\n\tparser = OptionParser()\n\tparser.add_option(\"-p\", dest=\"pdbfile\", help=\"pdbfile\")\n\tparser.add_option(\"-s\", dest=\"statefile\", help=\"statefile\")\n\tparser.add_option(\"-o\", dest=\"outname\", help=\"outname\")\n\tparser.add_option(\"-l\", dest=\"ligcutoff\", help=\"gridlig cutoff\", default=2.5)\n\tparser.add_option(\"-b\", dest=\"bbcutoff\", help=\"gridbb cutoff\", default=2.0)\n\tparser.set_description(main.__doc__)\n\t(options, args) = parser.parse_args()\n\n\tif not options.pdbfile or not options.statefile or not options.outname:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\t# get output filename\n\tcols = options.outname.split(\".\")\n\toutgridlig = cols[0] + \".gridlig\"\n\toutgridbb = cols[0] + \".gridbb\"\n\n\t# get backbone from protein\n\tprotein = Molecule()\n\tprotein.readPDB(options.pdbfile)\n\t\n\tsele = Selection()\n\tsele.makeSelection(\"BB\")\n\tbb = sele.apply_selection(protein).atomList()\n\n\t# read in previous statefile information\n\ttry:\n\t\tSTATEFILE = open(options.statefile)\n\texcept:\n\t\tprint \"unable to open statefile\"\n\t\tsys.exit()\n\n\tgridlig_file = \"\"\n\tgridbb_file = \"\"\n\tfor line in STATEFILE.readlines():\n\t\tcols = line.split()\n\t\tif cols[0] == \"gridlig:\":\n\t\t\tgridlig_file = cols[1]\n\t\tif cols[0] == \"gridbb:\":\n\t\t\tgridbb_file = cols[1]\n\n\tgridlig = grid()\n\tgridbb = grid()\n\n\tgridlig.read(gridlig_file)\n\tgridbb.read(gridbb_file)\n\n\tgridlig.setFullOccupied()\n\tgridbb.setFullOccupied()\n\n\tligcutoff = float(options.ligcutoff)\n\tbbcutoff = float(options.bbcutoff)\n\tgridTrimInclude(gridbb, bb, bbcutoff)\n\tgridTrimExclude(gridlig, bb, ligcutoff)\n\n\tgridlig.write(outgridlig)\n\tgridbb.write(outgridbb)", "def main(arguments):\n migration = Migration(arguments)\n return migration.run()", "def main():\n db = _db.Database(experiment.ORACLE_PATH)\n db.populate_kernel_names_table()\n db.commit()", "def shell():\n parser = argparse.ArgumentParser(\n \n description='pyrpipe diagnostic utility\\nGenerate shell script.',\n \n usage='''pyrpipe_diagnostic report [<args>] <logfile>\n \n ''') \n parser.add_argument('-o', help='out file \\ndefault: same as input logfile',action=\"store\")\n parser.add_argument('-c',help='Dump command options [(a)ll,fa(i)l,(p)ass]\\ndefault: a',default='a',action=\"store\")\n parser.add_argument('-v',help='verbose',action=\"store_true\")\n parser.add_argument('-f',help='Filter by programs. Provide a comma-separated list e.g., prefetch,STAR,bowtie2 \\ndefault None')\n parser.add_argument('logfile', help='The log file generated by pyrpipe',action=\"store\")\n args = parser.parse_args(sys.argv[2:])\n \n logFile=args.logfile \n #parse args\n vFlag=args.v\n if vFlag:\n print(\"Generating report\")\n outFile=\"\"\n if args.o is None:\n outFile=pu.get_file_basename(logFile)\n else:\n outFile=args.o\n outFile+='.sh'\n \n filters=[]\n if args.f is not None:\n filters= args.f.split(',')\n \n reports.generateBashScript(logFile,outFile,filters,args.c)", "def command(self, args):\n try:\n with Reader(args.filename, args.sql_command) as odb_reader:\n for row in odb_reader:\n print(row)\n except InterfaceError as err:\n print(f\"Query interface error: {err}\")\n except ProgrammingError as err:\n if \"Assertion failed\" in str(err):\n print(f\"Query error: {args.filename} does not appear to be a valid ODB2 file.\")\n else:\n print(f\"Query error: {err}\")", "def exec_blast(infile, config_file, out_name):\n\tdb, evalue = parse_config(config_file, \"blast\")\n\tfasta_string = SeqIO.read(infile, format=\"fasta\")\n\tresult_handle = NCBIWWW.qblast(\"blastp\", \"nr\", fasta_string.seq)\n\toutput= out_name + \".xml\"\n\tsave_file = open(output, \"w\")\n\tsave_file.write(result_handle.read())\n\tsave_file.close()\n\tresult_handle.close()\n\treturn (output)", "def _build_command(tables, fixtures_path, fixture_name):\n command = \"python manage.py dumpdata{0} --indent=4 > {1}/{2}\".format(\n tables, fixtures_path, fixture_name\n )\n return command", "def strudump(self, args):\n if not self.stru:\n print(\"missing CroStru file\")\n return\n self.dump_db_table_defs(args)", "def main():\n msg = \"\"\"\n ----------------------------------------------------- \\n\n Running this script will delete the target database! \\n\n And it will close connections on the target database. \\n\n Are you sure you wish to continue? (y/n) \\n\n ----------------------------------------------------- \\n\n \\n\"\"\"\n\n if input(msg).lower() != \"y\":\n sys.exit()\n\n # create the logfile\n oracle2postgres.create_logfile()\n\n # get settings for migration\n migration_config = oracle2postgres.get_migration_config()\n source_config = oracle2postgres.get_source_config()\n target_config = oracle2postgres.get_target_config()\n\n # check the schema exist on the source database\n source_engine = oracle2postgres.connect_to_source(source_config)\n oracle2postgres.check_schema_exist(source_engine,source_config['schema_list'])\n\n # check and remove null characters in strings\n oracle2postgres.check_for_nulls(source_engine,source_config['schema_list'],remove=True)\n\n # create a new database on the target\n # WARNING: deletes target database before creation!\n target_engine = oracle2postgres.connect_to_target(target_config)\n oracle2postgres.drop_connections(target_config['database'],target_engine)\n oracle2postgres.drop_database(target_config['database'],target_engine)\n oracle2postgres.create_database(target_config['database'],target_engine)\n\n # create the schema on the target database\n target_engine = oracle2postgres.connect_to_target(target_config,target_config['database'])\n oracle2postgres.create_target_schema(source_config['schema_list'],source_engine,target_engine)\n\n # run the migration\n oracle2postgres.migrate(source_config,target_config,migration_config)", "def backup_database():\n logger.info(\"start database_backup\")\n management.call_command('dbbackup', compress=True)\n logger.info(\"end database_backup\")", "def run(output, path):\n\n # Derive path to dbfile\n dbfile = os.path.join(path, \"articles.sqlite\")\n\n # Stream text from database to file\n Export.stream(dbfile, output)", "def run(self):\n command = (\"shp2pgsql -I -s 4326 -d {} {}.{}|psql\").format(self.shpname(),\n self.schema,\n self.tablename())\n\n self.pgw.shell(command)", "def init_db():\n # with current_app.open_resource(\"schema.sql\") as f:\n # db.executescript(f.read().decode(\"utf8\"))\n print(\"初始化数据库脚本文件!!!\")", "def db(filename = 'P51-11'):\n import pdb\n sys.argv[1:] = ['-v', filename]\n pdb.run('extract.main()')", "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n try: \n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n # copy data from S3 to staging tables in redshift\n print('Staging song and log data. This takes several minutes!')\n execute_query_list(cur, conn, copy_table_queries)\n \n print('Populating analytics tables.')\n # fill analytical tables from staging tables\n execute_query_list(cur, conn, insert_table_queries)\n finally:\n conn.close()", "def main():\n\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')", "def format_excel():\n subprocess.call([\"python\", format_excel_path])", "def main():\r\n if len(sys.argv) == 2:\r\n if sys.argv[1] == 'branch_name':\r\n print branch_name()\r\n elif sys.argv[1] == 'plat_id':\r\n print plat_id()\r\n else:\r\n print plat_id()\r\n print branch_name()\r\n return", "def main() -> None:\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print(\n \"Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}\".format(\n messages_filepath, categories_filepath\n )\n )\n df = load_data(messages_filepath, categories_filepath)\n\n print(\"Cleaning data...\")\n df = clean_data(df)\n\n print(\"Saving data...\\n DATABASE: {}\".format(database_filepath))\n save_data(df, database_filepath)\n\n print(\"Cleaned data saved to database!\")\n\n else:\n print(\n \"Please provide the filepaths of the messages and categories \"\n \"datasets as the first and second argument respectively, as \"\n \"well as the filepath of the database to save the cleaned data \"\n \"to as the third argument. \\n\\nExample: python process_data.py \"\n \"disaster_messages.csv disaster_categories.csv \"\n \"DisasterResponse.db\"\n )", "def main():\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--service', default=os.path.join(os.getenv(\"HOME\"), \".desservices.ini\"))\n parser.add_argument('--section', '-s', default='db-desoper',\n help='section in the .desservices file w/ DB connection information')\n parser.add_argument('--debug', '-d', help='print debug info', default=False, action='store_true')\n parser.add_argument('--header', help='print header derived from query', default=False, action='store_true')\n parser.add_argument('--format', '-f', help='format = csv or pretty', default=\"pretty\", choices=[\"pretty\", \"csv\"])\n parser.add_argument('--delimiter', help='delimiter for csv', default=\",\")\n parser.add_argument('--log', help='log queries to file (via append)', default=None)\n parser.add_argument('query', help='query to execute (or -, read from standard in)', default=None)\n\n args = parser.parse_args()\n if not args.log:\n args.log = os.getenv(\"DESPYDB_QUERY_LOG\") # Undefined -> returns None\n query(args)", "def und_create_db(udb_file, project_languages=\"c++\"):\n subprocess.call(f\"und create -db {udb_file} -languages {project_languages}\", shell=True)", "def main():\n args = prepper()\n if args.reset:\n shutil.rmtree(Path.home() / \".bandaid\")\n exit('Directory wiped and data wiped.')\n dbpath = checkFirstRun()\n if args.config:\n printConfig(dbpath)\n printlogo()\n if args.fetcher and args.bandname:\n fetchCurrentStatus(args.bandname, dbpath)\n if args.fetcher and not args.bandname:\n fetchCurrentStatus('foo', dbpath)\n if args.bandname:\n getBand(\" \".join(args.bandname), dbpath)\n else:\n exit('Must set band name -h for help.')", "def makeblastdb(fasta, program='blastn', returncmd=False, **kwargs):\n # Convert the options dictionary to a string\n options = kwargs_to_string(kwargs)\n # Set the dbtype appropriately\n if program == 'blastn' or program == 'tblastn' or program == 'tblastx':\n dbtype = 'nucl'\n else:\n dbtype = 'prot'\n # Remove the file extension from the file name\n output = os.path.splitext(fasta)[0]\n cmd = 'makeblastdb -in {fasta} -parse_seqids -max_file_sz 2GB -dbtype {dbtype} -out {output}{options}' \\\n .format(fasta=fasta,\n dbtype=dbtype,\n output=output,\n options=options)\n # Check if database already exists\n if not os.path.isfile('{output}.nhr'.format(output=output)):\n out, err = run_subprocess(cmd)\n else:\n out = str()\n err = str()\n if returncmd:\n return out, err, cmd\n else:\n return out, err", "def main():\n\n parser = init_parser()\n args = parser.parse_args()\n\n # Set up logging.\n level = logging.INFO\n if args.debug:\n level = logging.DEBUG\n logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s:' \\\n '%(lineno)s %(message)s ', level=level)\n logging.info(\"Logging started\")\n\n message = \"Backing up \"\n if args.source_code:\n message += \"source and \"\n message += \"data for: {0}\".format(args.app_id)\n logging.info(message)\n\n zk_connection_locations = appscale_info.get_zk_locations_string()\n zookeeper = zk.ZKTransaction(host=zk_connection_locations)\n db_info = appscale_info.get_db_info()\n table = db_info[':table']\n\n skip_list = args.skip\n if not skip_list:\n skip_list = []\n logging.info(\"Will skip the following kinds: {0}\".format(sorted(skip_list)))\n ds_backup = DatastoreBackup(args.app_id, zookeeper, table,\n source_code=args.source_code, skip_list=sorted(skip_list))\n try:\n ds_backup.run()\n finally:\n zookeeper.close()", "def main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')", "def makepacks(config, history, comm, collection, database, host, port, years):\n des_db = database if database else 'ccsdm'\n des_tbl = collection if collection else 'booking_dump'\n CleanBookingDump(history, years, comm, des_tbl, des_db, host=host, port=port).execute()\n return", "def main():\n conn = psycopg2.connect(f\"host=127.0.0.1 dbname=sparkifydb user={username} password={password}\")\n cur = conn.cursor()\n conn.set_session(autocommit=True)\n\n artists_data, songs_data = process_song_file()\n songplays_help_df, time_data, users_data = process_log_file()\n songplays_data = process_songplays_data(artists_data, songs_data, songplays_help_df)\n\n data_list = [songplays_data, users_data, songs_data, artists_data, time_data]\n for idx, (data, query) in enumerate(zip(data_list, insert_table_queries), start=1):\n print(f\"inserting file {idx}/{len(data_list)}\")\n for row in data:\n try:\n cur.execute(query, row)\n except psycopg2.Error as error:\n print(f\"Psychog2 error @ file {idx} row {row}: {error} NOTE: this file will not be inserted.\")\n\n conn.close()", "def main():\n print(\"Creating tables\")\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n connection_string = get_redshift_connection_string(config=config)\n conn = psycopg2.connect(connection_string)\n cur = conn.cursor()\n\n drop_tables(cur, conn)\n create_tables(cur, conn)\n\n conn.close()\n\n print(\"Tables have been successfully created\")" ]
[ "0.615836", "0.6075764", "0.59140235", "0.58454984", "0.58075655", "0.5771629", "0.5752413", "0.57355833", "0.5695151", "0.5693321", "0.5688143", "0.5677002", "0.5629271", "0.56139135", "0.56128824", "0.55926937", "0.55580354", "0.5531508", "0.55243224", "0.55176175", "0.55169505", "0.55017024", "0.5488147", "0.5454912", "0.5454912", "0.5454865", "0.5450833", "0.5420355", "0.5420355", "0.5420355", "0.5420355", "0.5420355", "0.54155576", "0.5410627", "0.54061335", "0.5401418", "0.54003316", "0.5398893", "0.5397886", "0.53794515", "0.53733796", "0.53733796", "0.5327167", "0.5324967", "0.5321999", "0.5297726", "0.5289183", "0.5289183", "0.52740353", "0.5264604", "0.5257321", "0.5255477", "0.5255477", "0.5255477", "0.5255477", "0.5255477", "0.5255477", "0.5255477", "0.5255477", "0.5246032", "0.52451235", "0.5237574", "0.5223577", "0.52102214", "0.5204911", "0.52001977", "0.5196808", "0.51928526", "0.5191129", "0.51909477", "0.51871884", "0.5184314", "0.5179167", "0.5169223", "0.51686144", "0.51671696", "0.5165582", "0.5158137", "0.515216", "0.5151471", "0.51484734", "0.5145634", "0.51416713", "0.5139279", "0.51392746", "0.5115186", "0.51144445", "0.5111008", "0.51084995", "0.5106807", "0.5106749", "0.5105147", "0.5099937", "0.5098518", "0.50961345", "0.5095765", "0.5094469", "0.5084735", "0.5072857", "0.5061528" ]
0.7314453
0
Which listofvalues does every element of series match first?
def which_lov(series: pd.Series, patterns: Sequence[Sequence[Any]], method: Optional[Union[Callable, str]] = None, **kwargs) -> np.ndarray: elov = [(i + 1, v) for i, lov in enumerate(patterns) for v in lov] if not elov: return np.zeros(series.size, int) num, value = zip(*elov) lov_idx_plus = np.concatenate(([0], num)) if method is None: mm = series.to_numpy() == np.array(value)[:, np.newaxis] elif not callable(method): # assume name of pd.Series.str method ptns = pd.Series(value) kwargs['na'] = False do_match = getattr(series.str, method) mm = ptns.apply(do_match, **kwargs).values else: mm = method(series, value, **kwargs) return lov_idx_plus[mm.any(axis=0) + mm.argmax(axis=0)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_value_occurence_in_series(value, series):\n \n series_values_occurence = series.value_counts()\n if value in series_values_occurence:\n return series_values_occurence[value]", "def map_values_to_value_list(value_list, values):\n return [value_list.index(x) for x in values]", "def first(self, values: groupable_element_type) -> Tuple[groupable, groupable_element_type]:\n # Index of first value in each segment, in input domain\n first_idx = self.permutation[self.segments]\n return self.unique_keys, values[first_idx] # type: ignore", "def uniform_list_check(value_list):\n return reduce((lambda acc, value: acc and value == value_list[0]), value_list, True)", "def get_new_values(values):\n new_values = []\n new_value = values[0]\n for value in values:\n if value > new_value:\n new_value = value\n new_values.append(new_value)\n return new_values", "def create_mapping(series):\n uq = np.sort(series.unique())\n return uq", "def value_from_list(key, values, by_first=False):\n i, j = (1, 0,) if not by_first else (0, 1,)\n for elm in values:\n if elm[i] == key:\n return elm[j]\n return None", "def has_equal_values_vec(x):\n return jnp.all(x == x[0])", "def useSeriesAbove(requestContext, seriesList, value, search, replace):\n newSeries = []\n\n for series in seriesList:\n newname = re.sub(search, replace, series.name)\n if max(series) > value:\n n = evaluateTarget(requestContext, newname)\n if n is not None and len(n) > 0:\n newSeries.append(n[0])\n\n return newSeries", "def FirstTrue(values, default=None):\n for value in values:\n if value:\n return value\n return default", "def get_arraysurdit(self,list_values):\n\n\t\tself.list_values = list_values\n\t\tset_values = set([x for x in self.list_values if self.list_values.count(x) > 1])\n\t\treturn list(set_values)[0]", "def compare(self, values, error=1.0):\n assert type(values) == list, \"Parameter 'values' must be a list.\"\n # mismatch per value\n mismatch = [0]*len(values)\n with_idx = [set() for i in range(len(values))]\n # compare values against each other\n for i in range(len(values)):\n for j in range(i+1, len(values)):\n try:\n if not np.isclose(values[i], values[j], atol=error):\n mismatch[i] = mismatch[i] + 1\n with_idx[i].add(j)\n mismatch[j] = mismatch[j] + 1\n with_idx[j].add(i)\n except TypeError:\n # ignore values that are no numbers (e.g., None); do not\n # count it as a mismatch (just like these values are not\n # available)!\n pass\n return mismatch, with_idx", "def find_repeating_frequency(values):\n frequencies = set([0])\n\n index = 0\n frequency = 0\n while True:\n found = False\n for value in values:\n frequency += value\n index += 1\n if frequency in frequencies:\n found = True\n break\n\n frequencies.add(frequency)\n\n if found:\n break\n\n return frequency", "def value_and_tally(xs):\n res = Counter()\n for x in xs:\n res[x] += 1\n\n return res.most_common(1)[0]", "def index_two_v2(values):\n\n pairs = []\n for i in range(len(values)):\n pairs.append((values[i], i))\n pairs.sort()\n return pairs[0][1], pairs[1][1] # indices of the values are in location 1 of each pair", "def apply(self):\n next_one = super().apply()\n next_both = set()\n\n for tup in next_one:\n if (tup[1], tup[0]) in next_one:\n next_both.add(tup)\n\n return list(next_both)", "def getDirectFollowSets(self, FIRST):\n self.init_follow = {v:set() for v in self.v }\n self.containsFOLLOWOf = set()\n for v in self.v:\n if v == self.np[0][0]: # Starting Production\n self.init_follow[v] = set(['$']) # $ is in follow of 'S' applying rule 1\n for prod in self.g[v]:\n for i in range(len(prod)):\n if prod[i] in self.v and i+1 < len(prod):\n if prod[i+1] in self.t:\n self.init_follow[prod[i]] |= set([prod[i+1]])\n else:\n t = i + 1\n while t < len(prod) and prod[t] in self.nullables_map:\n if self.nullables_map[prod[t]] == True:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]\n break\n t += 1\n if t >= len(prod): # every thing on rhs of prod[i] could produce epsison, rule - 3\n self.containsFOLLOWOf |= set([(prod[i], v)])\n else: #prod[i+1] is a non nullable prod or prod[t] was a terminal\n if prod[t] in self.t:\n self.init_follow[prod[i]] |= set([prod[t]])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n\n elif prod[i] in self.v:\n self.containsFOLLOWOf |= set([(prod[i], v)]) # applying rule 2\n\n #self.containsFOLLOWOf = set([(a, b) for (a, b) in self.containsFOLLOWOf if a != b]) # remove the self loops\n return self.init_follow", "def find(self, value: int) -> bool:\n for x in self.freq: \n if value - x != x and value - x in self.freq: return True \n elif value - x == x and self.freq[x] > 1: return True \n return False", "def consense_values(values):\n values = filter_none(values)\n if not values:\n return None\n if len(values) == 1:\n return values[0]\n if all(isinstance(v, bool) for v in values):\n # e.g. SOMATIC\n return any(values)\n elif all(isinstance(v, list) for v in values):\n # List of numbers, e.g. PL = 11395,783,0\n return consense_lists(values)\n\n values = flatten(values)\n if all(isinstance(v, (int, float)) for v in values):\n # Numeric, e.g. DP, GMICOV, GMIMAF\n val = np.median(values)\n if all(isinstance(v, int) for v in values):\n val = int(val)\n else:\n # Default: Use the most frequently occurring (modal) value\n ctr = collections.Counter(values)\n val = ctr.most_common()[0][0]\n return val", "def possible_values(self, seq):\n for element in seq:\n if element:\n return element\n return False", "def get_series(gval, series):\n minlen = min([len(d[series]) for f, d in gval])\n return np.stack([d[series][:minlen] for f, d in gval])", "def smoothen_values(self):\n return self._smoothen_values", "def get_frequencies(valueList):\n \n valueSums = []\n values = []\n for value in valueList:\n try:\n index = values.index(value)\n valueSums[index]+=1\n except:\n values.append(value)\n valueSums.append(1)\n\n return values, valueSums", "def find_index(vec_vals,target):\n target=np.atleast_1d(target) #turn scalar into iterable, no op if already array\n vec_vals=np.array(vec_vals)\n index_list=[]\n for item in target:\n first_index=np.argmin(np.abs(vec_vals - item))\n index_list.append(first_index)\n return index_list", "def index_min(values):\n return min(values), min(range(len(values)),key=values.__getitem__)", "def matchloc(alist,val): \n return [ilc for ilc,jlc in enumerate(alist) if jlc==val]", "def findRank(e, values):\n\tcount = 1\n\tfor ve in values:\n\t\tif ve < e:\n\t\t\tcount += 1\n\treturn count", "def find_series(self, key):\n # TODO: this could be more efficient if we pushed it down into Java\n return self.filter(lambda x: x[0] == key).first()[1]", "def all_values(*values):\n print(\"here\")\n values = [_normalize(v) for v in values]\n for v in zip(*values):\n yield all(v)", "def FIRST(L):\n global fi,eps\n R=set()\n eps_appear=False\n for x in L:\n eps_appear=False\n if not x.isTerminal():\n for o in fi[x]:\n if o==eps:\n eps_appear=True\n else:\n R.add(o)\n if eps not in fi[x]:\n break\n elif x!=eps:\n R.add(x)\n break\n else: # x==eps\n eps_appear=True\n if eps_appear:\n R.add(eps)\n if len(R)==0:\n R.add(eps)\n return R", "def search(self, values):\n if values is False:\n return False\n if all(len(values[square]) == 1 for square in self.squares):\n return values\n n, square = min((len(values[square]), square)\n for square in self.squares if len(values[square]) > 1)\n\n return self.possible_values(self.search(self.assign(values.copy(), square, dig))\n for dig in values[square])", "def __aggregate(self, series):\n if series.name in self.__non_redundant_entity_attributes or series.name in self.__redundant_entity_attributes: # Textual entities\n merged_sensitive_terms = list()\n for sensitive_terms in series.dropna():\n merged_sensitive_terms = merged_sensitive_terms + sensitive_terms\n return merged_sensitive_terms if len(merged_sensitive_terms) > 0 else None # Return merged result, or None\n else:\n if series.nunique() > 1: # Since there are more values, pack them into a list / frozenset\n if series.name in self.__textual_attributes or series.name in self.__config.get_insensitive_attributes():\n return list(series.array)\n else:\n return frozenset(series.array)\n else:\n return series.unique()[0] # Else return just this single value", "def _assert_same(values):\n assert len(values) > 0\n first, rest = values[0], values[1:]\n for v in rest:\n assert v == first\n return first", "def test_return_series_matching_idlist_frags_strictorder(cls):\n cls.test_mmp_series_object.setup_mmp_data_for_mms(cls.temp_file_input_csv_confusion.name,\n 'SMILES', 'ID', 'PIC50',\n 3, 0.50001)\n\n cls.test_mmp_series_object.generate_store_mmp_series()\n\n result_ = cls.test_mmp_series_object.return_series_matching_idlist([24, 34, 60], 'FRAG_ID',\n strict_ordering=True)\n #print(result_.to_dict())\n cls.assertDictEqual({'SERIES_ID': {5: 2, 6: 2, 7: 2, 8: 2, 9: 2, 23: 6, 24: 6, 25: 6, 26: 6, 27: 6},\n 'SERIES_SEQ_ID': {5: 1, 6: 2, 7: 3, 8: 4, 9: 5, 23: 1, 24: 1, 25: 2, 26: 3, 27: 4},\n 'CONTEXT_ID': {5: 29, 6: 29, 7: 29, 8: 29, 9: 29, 23: 208, 24: 208, 25: 208, 26: 208,\n 27: 208},\n 'FRAG_ID': {5: 24, 6: 34, 7: 60, 8: 82, 9: 286, 23: 24, 24: 9, 25: 34, 26: 60, 27: 82},\n 'MOL_ID': {5: 1, 6: 2, 7: 3, 8: 4, 9: 5, 23: 30002100, 24: 300021, 25: 298356,\n 26: 29835601, 27: 29835602},\n 'ACTIVITY': {5: 7.0, 6: 7.68, 7: 8.51, 8: 8.77, 9: 8.99, 23: 6.5, 24: 6.5, 25: 6.9,\n 26: 7.3, 27: 7.91}},\n result_.to_dict())", "def match(\n x: Any,\n table: Iterable,\n nomatch: Any = -1,\n # incomparables ...,\n base0_: bool = None,\n) -> Iterable[int]:\n base = int(not get_option(\"which_base_0\", base0_))\n return Array(\n [\n list(table).index(elem) + base if elem in table else nomatch\n for elem in x\n ],\n dtype=int,\n )", "def findall(lo,val):\n\tu = []\n\ti = -1\n\twhile( i < len(lo)-1):\n\t\ttry:\n\t\t\ti = lo.index(val,i+1)\n\t\t\tu.append(i)\n\t\texcept:\n\t\t\ti += 1\n\treturn u", "def _starts(self, column_labels):\n val = [self[c][0] for c in column_labels]\n starts = [0]\n values = [val]\n for i in range(1,self.num_rows):\n ival = [self[c][i] for c in column_labels ]\n if ival != val:\n starts.append(i)\n values.append(ival)\n val = ival\n return values, starts", "def find_remove_duplicates(list_of_values):\r\n output = []\r\n seen = set()\r\n for value in list_of_values:\r\n if value not in seen:\r\n output.append(value)\r\n seen.add(value)\r\n return output", "def cross_timeseries(series1, series2):\n\n ts_new1 = []\n val_new1 = []\n\n ts_new2 = []\n val_new2 = []\n\n for i in range(len(series1[1])):\n # for j in range(len(series2[1])):\n if series1[1][i] in series2[1]:\n ts_new1.append(series1[1][i])\n val_new1.append(series1[0][i])\n ts_new2.append(series2[1][series2[1].index(series1[1][i])])\n val_new2.append(series2[0][series2[1].index(series1[1][i])])\n\n return [val_new1, ts_new1], [val_new2, ts_new2]", "def findRanks(toBeRanked, values):\n\treturn list(map(lambda e: findRank(e, values), toBeRanked))", "def fn(nums):\n ans, vals = [], []\n for i, x in enumerate(nums): \n k = bisect_left(vals, x)\n if k == len(vals): vals.append(x)\n else: vals[k] = x\n ans.append(k)\n return ans", "def list_unique_values(series: pd.Series) -> str:\n return \", \".join(set(str(v) for v in pd.Series.unique(series)))", "def indicator_func(*args):\n for value_set in args:\n if value_set[0] != value_set[1]:\n return 0\n return 1", "def any(self, values):\n return self.aggregate(values, \"any\")", "def find_pairs_simple(candidate_array, TARGET_VALUE=10):\r\n for i in range(len(candidate_array)):\r\n for j in range(i + 1, len(candidate_array)):\r\n if (TARGET_VALUE == candidate_array[i] + candidate_array[j]):\r\n #print \"%d,%d\" % (candidate_array[i], candidate_array[j])\r\n None", "def find_min(values: Sequence[Optional[float]]) -> \\\n Tuple[Optional[int], float]:\n min_value: float = np.inf\n min_index: Optional[int] = None\n for index_, value_ in enumerate(values):\n if value_ is not None and value_ < min_value:\n min_value = value_\n min_index = index_\n return min_index, min_value", "def values(self):\n values = sorted(self._values.items(), key=lambda x: x[1])\n return [hospital[0] for hospital in values]", "def find_value(lists, target):\n loc = []\n l = len(lists)\n for i in range(0, l, 1):\n if(lists[i] == target):\n loc.append(i)\n else:\n continue\n return loc", "def separate_by_value(yvals):\n bins = {}\n for i, val in enumerate(yvals):\n if(val not in bins):\n bins[val] = []\n bins[val].append(i)\n \n return bins", "def values():", "def toset(series: pd.Series) -> Set:\n\n return set(series.tolist())", "def list_of_genres_no_political(pd_series):\n\n genres = []\n for genre_list in pd_series:\n for genre in genre_list:\n if genre not in genres:\n genres.append(genre)\n\n return genres", "def findClosed(freqSet, freqSup):", "def any_values(*values):\n values = [_normalize(v) for v in values]\n for v in zip(*values):\n yield any(v)", "def compare(self, values):\n raise NotImplementedError", "def _not_matching(values, sieve):\n return [val for val in values if val not in sieve]", "def fisher_p_value(contingency_table: np.ndarray) -> List[float]:\n _, fisher_p_value = stats.fisher_exact(contingency_table, alternative=\"greater\")\n return [fisher_p_value]", "def find(self, value):\n if value in self.s:\n return True\n\n for k in self.l:\n if value - k in self.l:\n if value - k == k and self.l[k] == 1:\n continue\n self.s[value] = value - k\n return True\n return False", "def freqvals(t):\n N = len(t)\n T = t[-1] - t[0]\n dt = T/N\n nyquist = 1/(2.0*dt)\n lowfreq = 1/T\n return (N, T, dt, nyquist, lowfreq)", "def find_index_of_closets_value(values, value_to_look_for):\n closest_index = 0\n # Init to some value\n closest_distance = max(abs(value_to_look_for), abs(values[0]))\n for index, value in enumerate(values):\n distance = abs(value - value_to_look_for)\n if distance < closest_distance:\n closest_index = index\n closest_distance = distance\n return closest_index", "def naive(p, t):\n\toccurence = []\n\tfor i in range(len(t)-len(p) + 1):\n\t\tmatch = True\n\t\tfor j in range(len(p)):\n\t\t\tif not p[j] == t[i+j]:\n\t\t\t\tmatch = False\n\t\t\t\tbreak\n\t\tif match:\n\t\t\toccurence.append(i)\n\treturn occurence", "def valfilter(ls,minmax):\n # Find how often each values occur in ls\n count = Counter(ls)\n # Remove keys that occur only once\n keys = count.keys()\n for key in keys:\n if count[key] == 1:\n del count[key]\n keys = count.keys()\n # Return min or max as specified\n if minmax == 'min':\n return min(keys)\n if minmax == 'max':\n return max(keys)", "def all_ver(seq):\r\n for item in seq:\r\n if np.array_equal(item, Quantifier.AnotB):\r\n return Quantifier.F\r\n return Quantifier.T", "def asin_array(values): # pragma: no cover\n result = np.empty_like(values, dtype=nb.float64)\n flat_result = result.flat\n flat_values = values.flat\n for i in range(values.size):\n flat_result[i] = asin(flat_values[i])\n return result", "def first2(x, y):\n y = np.asarray(y)\n return y[np.argsort(x)][0]", "def itervaluerefs(self):\r\n return self.data.itervalues()", "def condition(ls):\n return reduce(lambda x,y: False if x is False or x[0]==y[0] else y, ls) != False", "def best_match(sv: SiteVec, sgs: List[SiteGroup]) -> Tuple[SiteGroup, float]:\n best_match = None\n best_similarity = -np.inf\n for sg in sgs:\n if sv.species != sg.species:\n continue\n csim = cosine_similarity(sv.vec, sg.vec)\n if csim > best_similarity:\n best_similarity = csim\n best_match = sg\n if best_match is None:\n raise ValueError(\"No matching species found.\")\n return best_match, best_similarity", "def used_xvals(self):\n return [x for x in self.xvals() if any([len(self.get_plaquette(x, y)) > 0\n for y in self.yvals()])]", "def subdata(min_,dict_):\n list_ = []\n return [value for value,freq in dict_.items() if freq > min_]", "def first(x):\n try:\n x = x.to_series()\n except AttributeError:\n pass\n return list(x)[0]", "def find_pairs(candidate_array, TARGET_VALUE=10):\r\n \r\n from collections import defaultdict\r\n positions = defaultdict(list)\r\n \r\n #Read everything into a dictionary, storing the original array position \r\n for i in range(len(candidate_array)):\r\n positions[candidate_array[i]].append(i)\r\n\r\n #Read list comparing value to TARGET_VALUE \r\n for i in range(len(candidate_array)):\r\n pair_value = TARGET_VALUE - candidate_array[i]\r\n if positions[pair_value]:\r\n for p in positions[pair_value]:\r\n if p > i:\r\n #print \"%d,%d\" % (candidate_array[i], pair_value)\r\n None", "def values(self):\n return [i.value for i in self.value]", "def get_index(y, value):\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i", "def get_index(y, value):\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i", "def splite_dataset(data_set, axis, value):\n result = []\n for feat_vector in data_set:\n if feat_vector[axis] == value:\n reduced_feat_vector = feat_vector[:axis]\n reduced_feat_vector.extend(feat_vector[axis+1:])\n result.append(reduced_feat_vector)\n\n return result", "def get_values(self, value):\n return [\n obj for obj in self if obj == value\n ]", "def index_(iterable: Iterable[_Value], x: _Value) -> int:\n for i, value in enumerate(iterable):\n if x == value:\n return i\n elif isinstance(value, float) and isinstance(x, float):\n if abs(x - value) < FLOAT_EQUALITY_EPSILON:\n return i\n raise ValueError(\"{} is not in iterable\".format(str(x)))", "def find_bscs(ckt, a):\n return reduce(lambda x, y: x | y, [ckt[x].fins for x in a]).difference(set(a))", "def find(self, value):\n checks = set()\n n = len(self.arr)\n\n for i in range(n): # 0...n-1\n el = self.arr[i]\n if value - el in checks:\n # hurray, found a pair\n return True\n checks.add(el)\n return False", "def effectiveness_of_a_pair(list_of_historic_matches):\n was_late_list = []\n was_ht_late_list = []\n was_ft_late_list = []\n late_matches = 0\n ht_late_matches = 0\n ft_late_matches = 0\n if len(list_of_historic_matches) >= 5:\n for match in list_of_historic_matches:\n all_late = check_if_late_goal(match.match_goals_minutes)\n was_late_list.append(all_late[0])\n was_ht_late_list.append(all_late[1])\n was_ft_late_list.append(all_late[2])\n total_matches = len(list_of_historic_matches)\n for j in range(len(was_late_list)):\n if was_late_list[j]:\n late_matches += 1\n effectiveness = f\"{late_matches}/{total_matches}\"\n for j in range(len(was_ht_late_list)):\n if was_ht_late_list[j]:\n ht_late_matches += 1\n ht_effectiveness = f\"{ht_late_matches}/{late_matches}\"\n for j in range(len(was_ft_late_list)):\n if was_ft_late_list[j]:\n ft_late_matches += 1\n ft_effectiveness = f\"{ft_late_matches}/{late_matches}\"\n else:\n return [\"0/0\", \"0/0\", \"0/0\"]\n return [effectiveness, ht_effectiveness, ft_effectiveness]", "def _condense(a,b=None):\r\n\t\t\r\n\t\t# second is by default empty\r\n\t\tif b is None:\r\n\t\t\tb = []\r\n\t\t\r\n\t\t# add first into second\r\n\t\tfor i in a:\r\n\t\t\t\r\n\t\t\t# try to add onto all terms\r\n\t\t\tt = [i.add(j) for j in b]\r\n\t\t\t\r\n\t\t\t# check for match\r\n\t\t\tm = False\r\n\t\t\tfor n,j in enumerate(t):\r\n\t\t\t\tif j is not None:\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t# replace with combination\r\n\t\t\t\t\tb[n] = j\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t# stop searching\r\n\t\t\t\t\tm = True\r\n\t\t\t\t\tbreak\r\n\t\t\t\t\r\n\t\t\t# otherwise append\r\n\t\t\tif not m:\r\n\t\t\t\tb.append(i)\r\n\t\t\t\r\n\t\t\t# remove zeroes\r\n\t\t\tzo = lambda x: 0 in x\r\n\t\t\tb = [i for i in b if not zo(i)]\r\n\t\t\t\r\n\t\treturn b", "def find_singles(self, rhes):\n digit_count = self.count_dict(rhes)\n found = [num for num, val in digit_count.items() if val == 1]\n return [[number, pos] for pos, lst in enumerate(rhes) for number in found if number in lst]", "def values_like(self, value=0):\n values = _ensure_len(len(self), value, strict=True)\n try:\n return plist([x.values_like(v) for x, v in zip(self, values)], root=self.__root__)\n except Exception:\n pass\n return plist([v for v in values], root=self.__root__)", "def reducer(self, key, values):\n best_score = 0.0\n merged_tuple_indices = (0, 0) \n \n for score, index1, index2 in values:\n if score > best_score:\n best_score = score\n merged_tuple_indices = (index1, index2)\n result = (merged_tuple_indices, best_score)\n yield 1, result", "def values(self):\n for ts in self:\n yield self[ts]", "def check_series(text_list, set_list):\n in_list = []\n for word in text_list:\n all_words = re.sub('\\(.*?\\)', ',', word).split(',')\n all_words = list(filter(None, all_words))\n component_in_list = [component.strip(' ') in set_list for component in all_words]\n this_word_in_list = all(component_in_list)\n in_list.append(this_word_in_list)\n return in_list", "def cluster_eignvalues(vals):\n\n pos, neg, zer = [], [], []\n for val in vals:\n if is_close(val, 0.0):\n zer.append(val)\n elif val > 0:\n pos.append(val)\n elif val < 0:\n neg.append(val)\n else:\n raise Exception(\"The world is a weird place.\")\n\n return pos, neg, zer", "def most_frequent(s):\n\n hist = make_histogram(s)\n\n t = []\n for x, freq in hist.items():\n t.append((freq, x))\n \n t.sort(reverse = True)\n # print(t)\n res = []\n for freq, x in t:\n res.append(x)\n \n return res", "def list_value(self) -> global___Expression.RepeatedValue:", "def osd(counts):\n return (counts!=0).sum(), (counts==1).sum(), (counts==2).sum()", "def get_index(ks):\n unq_vals, unq_ix = np.unique(ks[:, 0], return_index=True)\n return np.vstack([unq_vals, unq_ix]).T", "def aitemfreq(a):\r\n scores = pstats.aunique(a)\r\n scores = N.sort(scores)\r\n freq = N.zeros(len(scores))\r\n for i in range(len(scores)):\r\n freq[i] = N.add.reduce(N.equal(a,scores[i]))\r\n return N.array(pstats.aabut(scores, freq))", "def compare_series(series_a, series_b):\n return {\n 'rmse': ((series_a - series_b) ** 2).mean() ** 0.5,\n 'mbe': (series_b - series_a).mean(),\n 'mae': abs(series_b - series_a).mean(),\n 'rsqr': stats.linregress(series_a, series_b).rvalue ** 2,\n }", "def first_and_last(values):\n first = values[0]\n last = values[-1]\n return [first, last]", "def coincident_maxima(a_list, b_list):\n a_maxima = local_maxima(a_list)\n b_maxima = local_maxima(b_list)\n\n return [x for x in a_maxima if x in b_maxima]", "def values(self, ps):\n ps = np.asarray(ps)\n if np.any(ps < 0) or np.any(ps > 1):\n raise ValueError('Probability p must be in range [0, 1]')\n\n index = np.searchsorted(self.ps, ps, side='left')\n return self.xs[index]", "def all_label_values(self, label_list_ids=None):\n values = set()\n\n for label_list in self.label_lists.values():\n if label_list_ids is None or label_list.idx in label_list_ids:\n values = values.union(label_list.label_values())\n\n return values", "def values(self):\n # TODO: Collect all values in each of the buckets\n all_values = [] # Will store all the key\n\n for bucket in self.buckets:\n for value in bucket:\n if value is not None:\n all_values.append(value[1])\n return all_values", "def strip_head(sequence, values):\n values = set(values)\n return list(itertools.dropwhile(lambda x: x in values, sequence))" ]
[ "0.6192266", "0.5891128", "0.5607933", "0.5490202", "0.53656596", "0.5315335", "0.53010494", "0.5252702", "0.52464557", "0.5246342", "0.522789", "0.51933503", "0.51531285", "0.51512164", "0.513468", "0.5120946", "0.5081794", "0.5081263", "0.50539905", "0.50522214", "0.5044985", "0.5000855", "0.49890947", "0.49885365", "0.49626026", "0.49572223", "0.49377844", "0.4914598", "0.49081996", "0.4895925", "0.48950222", "0.4894923", "0.48622802", "0.4856576", "0.48446617", "0.48443198", "0.48399612", "0.4825797", "0.4823169", "0.48206997", "0.48148862", "0.48115095", "0.4807194", "0.48068053", "0.47694033", "0.47648376", "0.4763964", "0.47634298", "0.4746094", "0.47393385", "0.47294822", "0.47236794", "0.47232458", "0.47196957", "0.47136936", "0.47031805", "0.4698537", "0.4692677", "0.46866217", "0.46800143", "0.46776086", "0.4655803", "0.4651248", "0.46353117", "0.46322688", "0.46303907", "0.4629984", "0.46289495", "0.46247855", "0.46245316", "0.4619973", "0.460744", "0.46065885", "0.46056688", "0.46056688", "0.45988673", "0.4597237", "0.45966575", "0.45840934", "0.4580448", "0.4579583", "0.45740664", "0.45722133", "0.4556717", "0.4555539", "0.45530674", "0.45529014", "0.4550079", "0.45459482", "0.45452565", "0.4544789", "0.45445886", "0.45440698", "0.45426315", "0.45424464", "0.45397797", "0.4532249", "0.4531685", "0.45295238", "0.45268002" ]
0.48023164
44
Returns tag of the first matched ListofValues. For each element in ``series`` returned is the tag of the listofvalues in the dictionary of LoVs ``taglov`` which first matches the element with one of its values OR value from donor with the same index OR ``na``.
def which_tag(series: pd.Series, taglov: Union[TagLoV, Any], na: Any, donor: pd.Series = None, method: Optional[Union[Callable, str]] = None, **kwargs): if series.empty: return series if not isinstance(taglov, TagLoV): taglov = TagLoV(taglov) lov_idx_plus = which_lov(series, taglov.lovs, method, **kwargs) tags_plus = np.array((na, *taglov.tags)) result = pd.Series(tags_plus[lov_idx_plus], index=series.index) if isinstance(donor, pd.Series): # take unmatched values from donor unmatched_idx = series.index[~lov_idx_plus.astype(bool)] if not unmatched_idx.empty: take_idx = unmatched_idx.intersection(donor.index) if not take_idx.empty: result[take_idx] = donor[take_idx] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tag_one(self, tokens, index, history):\n tag = None\n for tagger in self._taggers:\n tag = tagger.choose_tag(tokens, index, history)\n if tag is not None:\n break\n return tag", "def find_usefull_tags(tags, tagmodel, tag_count_vect):\n\n final_tags = []\n for tag in tags:\n if tag == None:\n continue\n else:\n tagpd = pd.Series(tag)\n tag_feature = tag_count_vect.transform(tagpd)\n result = tagmodel.predict(tag_feature)\n\n result = result.tolist() \n result = str(result)\n if result == '[1]':\n final_tags.append(tag)\n final_tags = list(dict.fromkeys(final_tags))\n return(final_tags)", "def find_first_tag(self, tag):\n for lm, _ in self.search(tag=tag):\n return lm", "def __aggregate(self, series):\n if series.name in self.__non_redundant_entity_attributes or series.name in self.__redundant_entity_attributes: # Textual entities\n merged_sensitive_terms = list()\n for sensitive_terms in series.dropna():\n merged_sensitive_terms = merged_sensitive_terms + sensitive_terms\n return merged_sensitive_terms if len(merged_sensitive_terms) > 0 else None # Return merged result, or None\n else:\n if series.nunique() > 1: # Since there are more values, pack them into a list / frozenset\n if series.name in self.__textual_attributes or series.name in self.__config.get_insensitive_attributes():\n return list(series.array)\n else:\n return frozenset(series.array)\n else:\n return series.unique()[0] # Else return just this single value", "def useSeriesAbove(requestContext, seriesList, value, search, replace):\n newSeries = []\n\n for series in seriesList:\n newname = re.sub(search, replace, series.name)\n if max(series) > value:\n n = evaluateTarget(requestContext, newname)\n if n is not None and len(n) > 0:\n newSeries.append(n[0])\n\n return newSeries", "def which_lov(series: pd.Series,\n patterns: Sequence[Sequence[Any]],\n method: Optional[Union[Callable, str]] = None,\n **kwargs) -> np.ndarray:\n elov = [(i + 1, v) for i, lov in enumerate(patterns) for v in lov]\n if not elov:\n return np.zeros(series.size, int)\n num, value = zip(*elov)\n lov_idx_plus = np.concatenate(([0], num))\n if method is None:\n mm = series.to_numpy() == np.array(value)[:, np.newaxis]\n elif not callable(method): # assume name of pd.Series.str method\n ptns = pd.Series(value)\n kwargs['na'] = False\n do_match = getattr(series.str, method)\n mm = ptns.apply(do_match, **kwargs).values\n else:\n mm = method(series, value, **kwargs)\n return lov_idx_plus[mm.any(axis=0) + mm.argmax(axis=0)]", "def tag(self, tokens):\n if overridden(self.tag_sents):\n return self.tag_sents([tokens])[0]", "def get_tag_value(\n service: str,\n tags: List[Any],\n tag_key: str,\n) -> str:\n capitalize = capitalize_tag_kv(service)\n matches = [\n t[f\"{'V' if capitalize else 'v'}alue\"]\n for t in tags\n if t[f\"{'K' if capitalize else 'k'}ey\"] == tag_key\n ]\n if len(matches) != 1:\n log_error(\n f\"Oops it looks like we're unable to find a match for tag {tag_key}.\"\n \"Please open an issue to help us get this fixed!\",\n )\n raise Abort()\n\n return matches[0]", "def _get_tagged_value(self, key):\n return self._tagged_values_dict[key]", "def return_tag_tokens(self, tags_indexes, observations):\n tag_pred = []\n for tag_index in tags_indexes:\n tag_pred.append(observations.T.index[tag_index])\n return tag_pred", "def get_tag_options(label_matches):\r\n\ttag_options = []\r\n\tfor key in label_matches.keys():\r\n\t\tif key[1] not in tag_options:\r\n\t\t\ttag_options.append(key[1])\r\n\treturn tag_options", "def best_sequence(self, T, pos, psi, phi, fix_tags=[]):\n for idx, m in fix_tags:\n phi[idx - 1, m] = 100\n # if fix_idx:\n # phi[fix_idx - 1, fix_m] = 100\n msgs, pointers = max_product(T, pos, psi, phi, True)\n tags_dict = get_best_tags(T, msgs, pointers)\n tags = []\n for i in range(1, len(T) + 1):\n tags.append(self.get_tag(tags_dict[str(i)]))\n return tags", "def predict(self, tokens: TokenSeq) -> PosSeq:\n _, pos_tags = self.predict_greedy(tokens)\n # _, _, pos_tags = self.predict_viterbi(tokens)\n return pos_tags", "def find_series(self, key):\n # TODO: this could be more efficient if we pushed it down into Java\n return self.filter(lambda x: x[0] == key).first()[1]", "def _series_handler(self, values, style, caller, *args):\n\n behaviors = {\"over\": values.ge,\n \"under\": values.lt}\n\n evaluated = values[behaviors.get(caller)(self.margin)]\n\n if style == \"values\":\n return evaluated\n else:\n return list(evaluated.index)", "def get_default_tag(self, tags):\n tags_counter = Counter()\n for tag in tags:\n tags_counter[tag] += 1\n\n if len(tags_counter) == 2 and list(tags_counter.values())[0] == list(tags_counter.values())[1]:\n return ut.find_positive_tag(tags_counter.keys())\n\n return tags_counter.most_common(1)[0][0]", "def get_latest_tag(self, repo: git.Repo) -> Tuple[Optional[\n git.refs.tag.TagReference], Optional[semantic_version.Version]]:\n raw_tag = self._search_strategy(\n repo=repo, branch=self._branch)\n if raw_tag is None:\n return None, None\n sem_tag = semantic_version.Version(\n tag_search_strategy.clean_tag_name(str(raw_tag)))\n return raw_tag, sem_tag", "def get_tag(self, xaf, name, not_found_value=None,\n counter_str_value='latest', force_step_name=None,\n force_plugin_name=None):\n tag_name = self.__get_tag_name(name, counter_str_value,\n force_step_name, force_plugin_name)\n return xaf.tags.get(tag_name, not_found_value)", "def _tag_tokens(self, targets: list, tokens: list, tags: dict=BIO_TAGS, bert_tokenizer=None, verbose: bool=False):\n if bert_tokenizer is not None:\n tokenizer = bert_tokenizer\n\n if len(targets) > 0:\n tags_list = []\n for tgt in targets:\n t_list = []\n inside = False\n found = False\n if bert_tokenizer is not None:\n tgt_terms = tokenizer.tokenize(tgt[1]) \n else:\n tgt_terms = self._tokenize_line(tgt[1])\n\n if verbose:\n print(tgt_terms)\n\n for i in range(len(tokens)):\n if tokens[i] == tgt_terms[0] and not found: \n # token is the beginning (B) of target terms sequence\n t_list.append(tags[\"B\"])\n if len(tgt_terms) > 1 and tokens[i:i+len(tgt_terms)] == tgt_terms:\n # check if the matching token is not a repetition of the term\n # and is the actual target term, if so the correct sequence is found \n inside = True\n found = True\n\n elif inside == True:\n # multi words terms\n if tokens[i] in tgt_terms[1:-1] and len(tgt_terms) > 2:\n # token is inside (I) the target terms sequence\n t_list.append(tags[\"I\"])\n\n elif tokens[i] == tgt_terms[-1]:\n # token is the last (L) target term\n t_list.append(tags[\"I\"]) # tags[\"L\"] \n inside = False\n\n # when the last tgt_word is repeated inside the tgt_terms \n inside = False\n\n else:\n # token is outside (O) the target terms sequence\n t_list.append(tags[\"O\"])\n\n tags_list.append(torch.Tensor(t_list))\n\n # merge tags\n tags_tensor = torch.stack(tags_list)\n res = torch.min(tags_tensor, dim=0)\n if verbose:\n print(\"targets:\", targets)\n print(\"tokens:\", tokens, \"-- len:\", len(tokens))\n print(\"tags:\", tags_list)\n #print(\"tags:\", tags_tensor.size())\n #print(\"res:\", res.values.size())\n \n return res.values\n\n else:\n return [tags[\"O\"] for t in tokens]", "def best_sequence_brute(self, T, pos, psi, phi):\n ms = self.get_all_tag_seq(len(T))\n log_scores = tr.zeros(len(ms), dtype=tr.float64)\n for i in range(len(ms)):\n log_scores[i] = self.log_score(T, pos, ms[i], psi, phi)\n best = ms[tr.argmax(log_scores)]\n tags = []\n for i in range(len(T)):\n tags.append(self.get_tag_index(best[i]))\n return tags", "def get_tag_values(self, event):\n raise NotImplementedError", "def _analyze_values(self,i,j,tags):\n ti = tags[i]\n tj = tags[j]\n si = get_simplified_pos(ti)\n sj = get_simplified_pos(tj)\n\n dt = abs(i-j)-1\n if dt >= 5: dt = 5\n if dt >= 10: dt = 10\n if dt >= 15: dt = 15\n dt = str(dt)\n\n if i == 0: \n tpi,spi = '-S-','-S-'\n else:\n tpi = tags[i-1]\n spi = get_simplified_pos(tpi)\n\n if j == len(tags)-1: \n tnj,snj = '-E-','-E-'\n else:\n tnj = tags[j+1]\n snj = get_simplified_pos(tnj)\n\n if i != j-1:\n tni = tags[i+1]\n sni = get_simplified_pos(tni)\n tpj = tags[j-1]\n spj = get_simplified_pos(tpj)\n else:\n tni,sni = '-M-','-M-'\n tpj,spj = '-M-','-M-'\n\n return si,sj,dt,tpi,tni,tpj,tnj,spi,sni,spj,snj", "def get_or_create_specific_tag_without_bilateral(**kwargs):\n ix = kwargs.pop('ix')\n channel = kwargs.pop('channel')\n tag_number = kwargs.pop('tag_number')\n\n if ix.tags_policy == 'ix_managed':\n used_tags = Tag.objects.filter(\n ix=ix).exclude(status='AVAILABLE').order_by('tag')\n\n else:\n pe_channel = get_pe_channel_by_channel(channel=channel, ix=ix)\n tag_domain = pe_channel.channel_port if pe_channel else None\n used_tags = Tag.objects.filter(\n ix=ix,\n tag_domain=tag_domain).exclude(\n status='AVAILABLE').order_by('tag')\n\n if used_tags.filter(tag=tag_number):\n return 0\n else:\n free_tags = get_tag_without_bilateral(\n ix=ix, channel=channel)\n if free_tags.filter(tag=tag_number):\n return free_tags.get(tag=tag_number)\n else:\n tag = instantiate_tag(\n channel=channel, ix=ix, tag_number=tag_number)\n return tag", "def get(self, label_sn):\n tags = self.list()\n return [\n tag\n for tag\n in tags\n if str(label_sn) in tag.get('args', {}).values()\n ]", "def tag(self, tokens):\n (yyhat, _) = self.tag_with_features(tokens)\n return yyhat", "def choose_ltv(self, label):\n tids = self.node_tids[label]\n vals = self.node_vals[label]\n losses = [self.tid_losses_dct[tid] for tid in tids]\n\n # -- try to return the value corresponding to one of the\n # trials that was previously chosen\n tid_set = set(tids)\n for tid in self.best_tids:\n if tid in tid_set:\n idx = tids.index(tid)\n rval = losses[idx], tid, vals[idx]\n break\n else:\n # -- choose a new best idx\n ltvs = sorted(zip(losses, tids, vals))\n best_idx = int(self.rng.geometric(1.0 / self.avg_best_idx)) - 1\n best_idx = min(best_idx, len(ltvs) - 1)\n assert best_idx >= 0\n best_loss, best_tid, best_val = ltvs[best_idx]\n self.best_tids.append(best_tid)\n rval = best_loss, best_tid, best_val\n return rval", "def _get_head_tags(\n self,\n head_tag: torch.Tensor,\n child_tag: torch.Tensor,\n head_indices: torch.Tensor,\n ) -> torch.Tensor:\n batch_size = head_tag.size(0)\n # shape (batch_size, 1)\n range_vector = torch.arange(batch_size, device=head_tag.device).unsqueeze(1)\n\n # This next statement is quite a complex piece of indexing, which you really need\n # to read the docs to understand. See here:\n # https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.indexing.html#advanced-indexing\n # In effect, we are selecting the indices corresponding to the heads of each word\n # from the sequence length dimension for each element in the batch.\n\n # shape (batch_size, sequence_length, tag_dim)\n selected_head_tag = head_tag[range_vector, head_indices]\n selected_head_tag = selected_head_tag.contiguous()\n # shape (batch_size, sequence_length, num_head_tags)\n head_tag_logits = self.tag_bilinear(selected_head_tag, child_tag)\n return head_tag_logits", "def FirstTrue(values, default=None):\n for value in values:\n if value:\n return value\n return default", "def tag(self, tokens):\n numRows = len(self.tags)\n numCols = len(tokens)\n # initialize tables for dynamic programming\n table = array([[0] * numCols] * numRows, dtype=float32)\n trace = array([[None] * numCols] * numRows)\n \n # fill in the base cases, i.e. the first column\n for row in range(numRows):\n currentTag = self.tags[row]\n currentWord = tokens[0] if tokens[0] in self.vocab else '<OOV>'\n table[row][0] = self.tag_tag_probs['<START>'].prob(currentTag) * self.tag_word_probs[currentTag].prob(currentWord)\n trace[row][0] = '<START>'\n \n # fill the rest of the table\n # iterate through by columns\n for col in range(1, numCols):\n for row in range(numRows):\n currentTag = self.tags[row]\n currentWord = tokens[col] if tokens[col] in self.vocab else '<OOV>'\n maxProbability = 0.0;\n maxPrevRow = 0\n \n # iterate through the previous column and find the maximum probability\n # as well as the previous tag that led to the maximum probability\n for prevRow in range(numRows):\n prevTag = self.tags[prevRow]\n probability = table[prevRow][col-1] * self.tag_tag_probs[prevTag].prob(currentTag) * self.tag_word_probs[currentTag].prob(currentWord)\n if probability > maxProbability:\n maxProbability = probability\n maxPrevRow = prevRow\n \n table[row][col] = maxProbability\n trace[row][col] = maxPrevRow\n \n returnList = []\n # retrace and construct the tag list\n maxIndex = argmax(table, axis=0)[-1]\n # insert the last (token, tag) pair\n returnList.insert(0, (tokens[-1], self.tags[maxIndex]))\n # loop through the trace table and prepend each (token, tag) pair\n i = numCols - 1\n index = trace[maxIndex][numCols-1]\n while i > 0:\n returnList.insert(0, (tokens[i-1], self.tags[index]))\n i -= 1\n index = trace[index][i]\n \n return returnList", "def values(self):\n store = getMainStore()\n tagIDs = self._getTagIDs()\n where = self._getWhereClause(tagIDs)\n return store.find((Tag, TagValue), *where)", "def pos_taggers(self, df):\n p2 = []\n post_process = df['Keyword'].tolist() \n p1 = nltk.pos_tag(post_process)\n for i in post_process:\n p2.append(nltk.pos_tag([i]))\n return p1,p2", "def _get_tagged_doc(self, doi):\n\n return self.tagged_docs[list(map(lambda x: x.tags[0], self.tagged_docs)).index(doi)]", "def get_tag_names(**kwargs):\n\n if kwargs:\n if 'key' and 'values' in kwargs:\n names = Tags.get_filtered_names_in(kwargs['key'], kwargs['values'])\n\n else:\n names = Tags.get_filtered_names(**kwargs)\n\n else:\n names = Tags.get_names()\n\n return names", "def first(self, values: groupable_element_type) -> Tuple[groupable, groupable_element_type]:\n # Index of first value in each segment, in input domain\n first_idx = self.permutation[self.segments]\n return self.unique_keys, values[first_idx] # type: ignore", "def get_or_create_specific_tag_without_all_service(**kwargs):\n ix = kwargs.pop('ix')\n channel = kwargs.pop('channel')\n tag_number = kwargs.pop('tag_number')\n\n if ix.tags_policy == 'ix_managed':\n used_tags = Tag.objects.filter(\n ix=ix).exclude(status='AVAILABLE').order_by('tag')\n\n else:\n pe_channel = get_pe_channel_by_channel(channel=channel, ix=ix)\n tag_domain = pe_channel.channel_port if pe_channel else None\n used_tags = Tag.objects.filter(\n ix=ix,\n tag_domain=tag_domain).exclude(\n status='AVAILABLE').order_by('tag')\n\n if used_tags.filter(tag=tag_number):\n return 0\n else:\n free_tags = get_tag_without_all_service(\n ix=ix, channel=channel)\n if free_tags.filter(tag=tag_number):\n return free_tags.get(tag=tag_number)\n else:\n tag = instantiate_tag(\n channel=channel, ix=ix, tag_number=tag_number)\n return tag", "def find_min(values: Sequence[Optional[float]]) -> \\\n Tuple[Optional[int], float]:\n min_value: float = np.inf\n min_index: Optional[int] = None\n for index_, value_ in enumerate(values):\n if value_ is not None and value_ < min_value:\n min_value = value_\n min_index = index_\n return min_index, min_value", "def get_tag(tag_name, tag_list):\n for i in range(len(tag_list)):\n if tag_name == str(tag_list[i]):\n return tag_list[i]", "def value_from_list(key, values, by_first=False):\n i, j = (1, 0,) if not by_first else (0, 1,)\n for elm in values:\n if elm[i] == key:\n return elm[j]\n return None", "def get_tag(self, tag_name):\n tag_values = []\n for tag in self.tags:\n if tag.tag_name == tag_name:\n tag_values.append(tag.tag_value)\n\n if len(tag_values) == 1:\n if tag_values[0].lower() == 'true':\n return True\n elif tag_values[0].lower() == 'false':\n return False\n else:\n return tag_values[0]\n elif len(tag_values) > 1:\n return tag_values\n else:\n return False", "def get_tag(self, tag_name):\n tag_values = []\n for tag in self.tags:\n if tag.tag_name == tag_name:\n tag_values.append(tag.tag_value)\n\n if len(tag_values) == 1:\n if tag_values[0].lower() == 'true':\n return True\n elif tag_values[0].lower() == 'false':\n return False\n else:\n return tag_values[0]\n elif len(tag_values) > 1:\n return tag_values\n else:\n return False", "def get_series(gval, series):\n minlen = min([len(d[series]) for f, d in gval])\n return np.stack([d[series][:minlen] for f, d in gval])", "def get_creator_from_head_node_tag(pd_series):\n\n # Check head node exists Head node\n if not pd.isna(pd_series[\"Head Node\"]):\n creator = get_creator_tag(pd_series[\"Head Node\"])\n else:\n # Return NA if no head node\n return np.nan\n\n # Check creator also isn't none\n if creator is not None:\n return creator\n\n return np.nan", "def _get_labels(touches):\n \n out = touches.copy(deep=True)\n # pandas df.min() ignores NaN values\n first_touch = touches[['stop_loss', 'take_profit']].min(axis=1)\n for loc, t in first_touch.items():\n if pd.isnull(t):\n out.loc[loc, 'label'] = 0\n elif t == touches.loc[loc, 'stop_loss']:\n out.loc[loc, 'label'] = -1\n else:\n out.loc[loc, 'label'] = 1\n return out", "def var(\n self, values: pdarray, skipna: bool = True, ddof: int_scalars = 1\n ) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"var\", skipna, ddof)\n return k, cast(pdarray, v)", "def min(self, values: pdarray, skipna: bool = True) -> Tuple[groupable, pdarray]:\n if values.dtype == bool:\n raise TypeError(\"min is only supported for pdarrays of dtype float64, uint64, and int64\")\n k, v = self.aggregate(values, \"min\", skipna)\n return k, cast(pdarray, v)", "def toSingleLabel(tags, pos=0):\n # test format \n r = random.randint(0, len(tags)-1)\n\n # in multilabel format? each tag is in the form of [e1, e2, ...]\n isMultiLabel = True if hasattr(tags[r], '__iter__') else False\n \n labels = []\n if isMultiLabel: \n assert pos < len(tags[r]) and pos >= 0, \"Invalid index pos=%d\" % pos \n for i, tag in enumerate(tags): \n labels.append(tag[pos])\n else: \n # noop, already done \n print('TDocTag.toSingleLabels> Already in single-label format:\\n%s\\n' % tags[:10])\n labels = tags\n return labels", "def _extract_series_currencies(series: dict) -> tuple:\n return tuple(curr['@value'] for curr in series[\"generic:SeriesKey\"][\"generic:Value\"][1:3])", "def tag_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tag_value\")", "def get_first_level_tags(self) -> Any:\n return self.orthanc.get_instance_first_level_tags(self.identifier)", "def tag(self):\n \n tag = super(self.__class__, self).tag();\n tag = als.tag_join(tag, als.stra(self.strain));\n tag = als.tag_join(tag, als.stra(self.dtype));\n tag = als.tag_join(tag, 'w=%s' % als.stra(self.wid)); \n tag = als.tag_join(tag, 's=%s' % als.stra(self.stage));\n #tag = analysis.tag_join(tag, 'l=%s' % analysis.stra(self.label)); \n\n return tag;", "def find_tags_one_filter(self, tag_name, tag_confidence):\n tag_filter = {'tags.tag': tag_name, 'tags.confidence': tag_confidence}\n return self._collection_asset_tags.find(tag_filter)", "def tag_incident(incident):\n\ttry:\n\t\treturn re.findall(regex, incident)[0]\n\texcept TypeError:\n\t\treturn pd.np.nan\n\texcept IndexError:\n\t\treturn pd.np.nan", "def getTags(number=None):", "def independent_tags(self):\n if not RerankingParser._parser_model_loaded:\n raise ValueError(\"You need to have loaded a parser model in \"\n \"order to calculate most likely tags.\")\n return Tree(self.sentrep.makeFailureTree('X')).tags()", "def find_tag(reader, variant, extract_reader=None, window_size=100e3,\n maf_threshold=0.01, sample_normalization=True):\n\n genotypes = reader.get_variants_in_region(\n variant.chrom,\n variant.pos - (window_size // 2),\n variant.pos + (window_size // 2)\n )\n\n # Take the subset of genotypes that are also available in the other\n # genetic dataset if provided.\n if extract_reader is not None:\n genotypes = [\n i for i in genotypes\n if len(extract_reader.get_variant_genotypes(i.variant)) == 1\n ]\n\n # Filter suitable tags i.e. unambiguous and common enough.\n def _valid(g):\n return (\n (not g.variant.alleles_ambiguous() and g.maf() >= maf_threshold) or\n g.variant == variant\n )\n\n genotypes = [g for g in genotypes if _valid(g)]\n\n # There are no other variants in the region to be used as tags.\n if len(genotypes) < 2:\n return None\n\n # Find the index variant.\n idx = 0\n while idx < len(genotypes) - 1:\n if genotypes[idx].variant == variant:\n break\n\n else:\n idx += 1\n\n if genotypes[idx].variant != variant:\n logger.warning(\n \"Could not find tags for variant: {} (not in reference panel).\"\n \"\".format(variant)\n )\n return None\n\n # Compute the LD.\n r = geneparse.utils.compute_ld(genotypes[idx], genotypes, r2=False).values\n r[idx] = 0\n\n best_tag = np.argmax(r ** 2)\n\n return genotypes[idx], genotypes[best_tag], r[best_tag]", "def get_asg_tag(tags, tag_name):\n result = {}\n for tag in tags:\n for key, val in tag.items():\n if val == tag_name:\n result = tag\n return result", "def tags(self) -> Optional[Any]:\n return pulumi.get(self, \"tags\")", "def cypher_naTag_all(self, variable_tagNA=\"na_tag\"):\n query = f'({variable_tagNA}{self.label}'\n if self.keyword or self.synonyms is not None:\n query += \"{\"\n if self.keyword:\n query += f'{self.databaseInfoTag[\"properties\"][\"keyword\"]}:\\'{self.keyword}\\','\n if self.synonyms:\n query += f'{self.databaseInfoTag[\"properties\"][\"synonyms\"]}:' + '[\\'' + '\\',\\''.join(self.synonyms) + '\\'],'\n query = query[:-1] + \"}\"\n return query + \")\"", "def pval_at_rna_by_nbinom(\n self, pos_dict_of_counts: Mapping[str, List], neg_vals_at_rna: np.array, gene_and_type,\n log_if_values_above=1E9,\n log_values=False, which='per_read',\n verbose=False):\n\n if len(neg_vals_at_rna) == 0:\n return None\n\n log_scale_high_value = (np.mean(neg_vals_at_rna) > log_if_values_above)\n\n if log_values or log_scale_high_value:\n log_this_gene = True\n neg_vals_at_rna = np.log10(neg_vals_at_rna)\n else:\n log_this_gene = False\n \n #if not np.any(neg_vals_at_rna):\n #print(\"No positive values in negatives.\")\n # neg_vals_at_rna = np.array([\n # self.negatives.lowest_positive_vals[which][x]/10 for x in \\\n # self.negatives.metadata.random_proteins])\n #print(f\"negatives now {neg_vals_at_rna}\")\n mean_negative = np.average(neg_vals_at_rna)\n std_negative = np.std(neg_vals_at_rna)\n\n vmr = (std_negative**2)/mean_negative\n\n verbose and print(f'vmr for negatives={vmr}')\n # Use a poisson if the var/mean is low enough:\n if vmr < 2:\n verbose and print(\"Using poisson.\")\n self.stats_log['vmr<2'] += 1\n pois = stats.poisson(mean_negative)\n return self.use_dist(pos_dict_of_counts, log_this_gene, pois)\n\n verbose and print(\"Wil try to use NB.\")\n self.stats_log['vmr>=2'] += 1\n\n # Try to fit a NB useing statsmodels.\n q = sm.NegativeBinomial(\n neg_vals_at_rna, np.array([1] * len(neg_vals_at_rna)), loglike_method='nb2')\n try:\n res = q.fit(disp=0)\n except: # If a NB can't be fit, revert to a poisson.\n print(f\"Could not run q.fit(disp=0) on neg_vals_at_rna= {neg_vals_at_rna}. Using poisson.\")\n pois = stats.poisson(mean_negative)\n return self.use_dist(pos_dict_of_counts, log_this_gene, pois)\n\n # Create a scipy.stats.nbinom object to use its cdf, based on the statsmodels fit parameters.\n # There is no cdf function for the statsmodels object.\n mu = res.predict()[0] # alpha = res.params[1]\n size = 1. / res.params[1] # prob = size / (size + mu)\n\n verbose and print(f\"Fit NB mu={mu}\")\n \n pvals = self.use_dist(\n pos_dict_of_counts, log_this_gene, stats.nbinom(size, size/(size + mu)))\n\n return pvals", "def event_to_tag(self, event: Event, solution: Solution) -> Optional[Tag]:\n return solution.resolve(event)", "def _make_natural_type(self):\n for tag in self.tags:\n if self.tags[tag] is None or str(self.tags[tag]).strip() == \"\":\n self.tags[tag] = None\n else:\n if tag.lower() in VASP_TAG_INT_LIST:\n try:\n self.tags[tag] = int(self.tags[tag])\n except ValueError:\n raise IncarError(\"Could not convert '\" + tag + \"' : '\" + self.tags[tag] + \"' to int\")\n elif tag.lower() in VASP_TAG_FLOAT_LIST:\n try:\n self.tags[tag] = float(self.tags[tag].lower().replace('d','e'))\n except ValueError:\n raise IncarError(\"Could not convert '\" + tag + \"' : '\" + self.tags[tag] + \"' to float\")\n elif tag.lower() in VASP_TAG_BOOL_LIST:\n if not self.tags[tag].lower() in ['.true.','.false.']:\n raise IncarError(\"Could not find '\" + tag + \"' : '\" + self.tags[tag].lower() + \"' in ['.true.','.false.']\")\n else:\n self.tags[tag] = (self.tags[tag].lower() == '.true.')\n elif tag.lower() in VASP_TAG_SITEF_LIST + VASP_TAG_SPECF_LIST:\n temp = []\n for value in self.tags[tag].split():\n try:\n item=value.split('*')\n if len(item)==1:\n temp.append(float(value))\n else:\n if item[0] != 0:\n temp.append(str(item[0])+'*'+str(float(item[1])))\n except ValueError:\n raise IncarError(\"Could not convert '\" + tag + \"' : '\" + self.tags[tag] + \"' to float list\")\n self.tags[tag] = temp\n elif tag.lower() in VASP_TAG_SPECI_LIST:\n temp = []\n for value in self.tags[tag].split():\n try:\n temp.append(int(value))\n except ValueError:\n raise IncarError(\"Could not convert '\" + tag + \"' : '\" + self.tags[tag] + \"' to int list\")\n self.tags[tag] = temp\n elif tag.lower() in VASP_TAG_STRING_LIST:\n self._check_string_tag(tag,self.tags[tag])", "def tags(self) -> Optional[Mapping[str, Sequence[str]]]:\n return pulumi.get(self, \"tags\")", "def tag_data(df, tag, var='auto_tag'):\n return df[df[var].eq(tag)]", "def where_na_like(l):\n bool_index = np.array(map(lambda x: np.isinf(x) or \\\n pandas.isnull(x), l))\n return np.where(bool_index)[0]", "def _parse_trend(self, key, trend_list):\n return next(d for d in trend_list if key in d)[key]", "def return_wikipedia_term(res):\n rst = []\n if res['spotted']:\n for s in [s['spot'] for s in res['value']['spots']]:\n r = TagMeService.retrieve_taggings(s.encode('utf-8'), method='POST')\n if len(r['annotations']) != 0:\n for n in r['annotations']:\n if 'title' in n.keys():\n title = n['title'].replace(' ', '_') # strip whitespaces from dbpedia tag\n rst.append(title)\n else:\n print \"Cannot find title in annotations: \" + str(n)\n return rst", "def pos_tag(self, sentence):\n tags = []\n tokens = sentence.split(\" \")\n for i in range(len(tokens)):\n tags.append('')\n for i in range (len(tokens)):\n feat = []\n feat.append(self.features(tokens,tags,i))\n tag_predicted = self.postagger.predict(feat)[0]\n tags[i] = tag_predicted\n return tags", "def get(self, tag, index=None, default=None, parsed=False):\n\n # FIXME - move externally, use for get_all as well, and support numbers and dates\n def parse(column, value):\n if parsed:\n if column.has_attribute('list'):\n return re.split(r'\\s*,\\s*', value)\n else:\n return [value]\n return value\n\n if type(tag) is TagPattern:\n pattern = tag\n else:\n pattern = TagPattern.parse(tag)\n\n for i, column in enumerate(self.columns):\n if i >= len(self.values):\n break\n if pattern.match(column):\n if index is None:\n # None (the default) is a special case: it means look\n # for the first truthy value\n if self.values[i]:\n return parse(column, self.values[i])\n else:\n # Otherwise, look for a specific index\n if index == 0:\n return parse(column, self.values[i])\n else:\n index = index - 1\n return default", "def pos_tag(\n words: List[str], engine: str = \"perceptron\", corpus: str = \"orchid\"\n) -> List[Tuple[str, str]]:\n _corpus = corpus\n _tag = []\n if corpus == \"orchid_ud\":\n corpus = \"orchid\"\n if not words:\n return []\n\n if engine == \"perceptron\":\n from .perceptron import tag as tag_\n elif engine == \"artagger\":\n tag_ = _artagger_tag\n else: # default, use \"unigram\" (\"old\") engine\n from .unigram import tag as tag_\n _tag = tag_(words, corpus=corpus)\n\n if _corpus == \"orchid_ud\":\n _tag = _orchid_to_ud(_tag)\n\n return _tag", "def tag(text, pos_tagger):\n features = [get_crf_features([word for word in sent]) for sent in text]\n tags = pos_tagger.predict(features)\n tagged_text = []\n for i in range(len(text)):\n tagged_sent = []\n for j in range(len(text[i])):\n tagged_sent.append((text[i][j], tags[i][j]))\n tagged_text.append(tagged_sent)\n #print(tags)\n return tags, tagged_text", "def labels(self) -> pd.Series:\n return self.data.apply(to_label, axis=1)", "def _handleFoundValues(self, jvalues):\n # TODO figure out if we could be more cleaver in what values are combined\n value = None\n _set_op = True\n if self.local_value:\n jvalues.append((None, self.local_value))\n _log.debug(\"_handleFoundValues %s\" % str(jvalues))\n # Filter out deleted values\n jvalues = [v for v in jvalues if v[1] is not None]\n if len(jvalues) > 1:\n args = (self.node.long_id, str(jvalues))\n _log.debug(\"Got multiple values for key %i: %s\" % args)\n try:\n values = [(v[0], json.loads(v[1])) for v in jvalues]\n value_all = []\n for v in values:\n value_all = value_all + v[1]\n value = json.dumps(list(set(value_all)))\n except:\n # Not JSON coded or list, probably trying to do a get_concat on none set-op data\n # Do the normal thing\n _log.debug(\"_handleFoundValues ********\", exc_info=True)\n valueCounts = Counter([v[1] for v in jvalues])\n value = valueCounts.most_common(1)[0][0]\n _set_op = False\n else:\n try:\n key, value = jvalues[0]\n except:\n value = \"[]\" # JSON empty list\n\n peerToSaveTo = self.nearestWithoutValue.popleft()\n if peerToSaveTo is not None:\n _log.debug(\"nearestWithoutValue %d\" % (len(self.nearestWithoutValue)+1))\n if _set_op:\n d = self.protocol.callAppend(peerToSaveTo, self.node.id, value)\n else:\n d = self.protocol.callStore(peerToSaveTo, self.node.id, value)\n return d.addCallback(lambda _: value)\n # TODO if nearest does not contain the proper set push to it\n return value", "def tag_dict_values (self):\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT keyword \"\r\n +\"FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n return self.tag_dict.values()", "def tags(self) -> Optional[Mapping[str, Any]]:\n return pulumi.get(self, \"tags\")", "def update(self, tree: \"Tree\") -> List[ValueObject]:\n new_values = set([])\n not_matched = set([])\n to_delete = set([])\n # Trees are lazy and need to be initialized before use.\n self.init()\n tree.init()\n # self.tree doesn't have labels -> there are no labels to query.\n if not self.tree and tree.vos:\n del self.vos[:]\n not_matched = range(len(tree.vos))\n else:\n # search_hits saves the intersection of all label matches.\n # The indices in the sets at the end are the search hits.\n search_hits = {ix: set([]) for ix in range(len(tree.vos))}\n for label in self.label_grid:\n if label in (\"_auto\",):\n continue\n if label in tree.tree and label in self.tree:\n # All label values that exist in both trees.\n for label_value in (\n tree.tree[label].keys() & self.tree[label].keys()\n ):\n for new_ix in tree.tree[label][label_value]:\n if new_ix in search_hits:\n if search_hits[new_ix]:\n search_hits[new_ix] &= self.tree[label][\n label_value\n ]\n else:\n search_hits[new_ix] |= self.tree[label][\n label_value\n ]\n # All label values in the new tree that are not in this tree.\n # Value objects that have a label value that is not included\n # in the current tree means that they will not be matched.\n for label_value in (\n tree.tree[label].keys() - self.tree[label].keys()\n ):\n for new_ix in tree.tree[label][label_value]:\n search_hits.pop(new_ix)\n not_matched.add(new_ix)\n elif label in self.tree:\n # All value objects with labels not specified in the other\n # tree are treated as search hits (for this label).\n unused_label = set.union(*self.tree[label].values())\n for new_ix in search_hits:\n if search_hits[new_ix]:\n search_hits[new_ix] &= unused_label\n else:\n search_hits[new_ix] |= unused_label\n elif label in tree.tree:\n raise ParamToolsError(\n f\"Label {label} was not defined in the defaults.\"\n )\n\n for ix, search_hit_ixs in search_hits.items():\n if search_hit_ixs:\n if tree.vos[ix][\"value\"] is not None:\n for search_hit_ix in search_hit_ixs:\n self.vos[search_hit_ix][\"value\"] = tree.vos[ix][\n \"value\"\n ]\n else:\n to_delete |= search_hit_ixs\n else:\n not_matched.add(ix)\n if to_delete:\n # Iterate in reverse so that indices point to the correct\n # value. If iterating ascending then the values will be shifted\n # towards the front of the list as items are removed.\n for ix in sorted(to_delete, reverse=True):\n del self.vos[ix]\n\n if not_matched:\n for ix in not_matched:\n if tree.vos[ix][\"value\"] is not None:\n self.vos.append(tree.vos[ix])\n new_values.add(len(self.vos) - 1)\n\n # It's faster to just re-build from scratch if values are deleted.\n if to_delete:\n self.new_values = None\n self.needs_build = True\n else:\n self.new_values = new_values\n self.needs_build = True\n\n return self.vos", "def get_tag_for_instance(self, instance_id, tag_key):\n tags = self.get_tags_for_instance(instance_id)\n for tag in tags:\n if tag['Key'] == tag_key:\n return tag['Value']\n return None", "def build_taglist(tags):\n taglist = []\n for tag in tags:\n taglist.append(tag['value'].lower())\n return taglist", "def tags(self) -> Optional[Sequence['outputs.ParameterGroupTag']]:\n return pulumi.get(self, \"tags\")", "def pos_tags(self):\n \n msg(\"Getting POS tag list...\")\n tags = []\n \n # loop through sentences\n for sent in self.tagged_sents:\n \n # loop through tagged words\n for (word, pos) in sent:\n \n # add tag if it's not already in list\n if pos not in tags:\n tags.append(pos)\n\n msg(\"done\\n\")\n \n return tags", "def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DomainTagArgs']]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DomainTagArgs']]]]:\n return pulumi.get(self, \"tags\")", "def verify_value_occurence_in_series(value, series):\n \n series_values_occurence = series.value_counts()\n if value in series_values_occurence:\n return series_values_occurence[value]", "def encontrarPosicionSiguiente(listaVecinosLibres):\r\n posicionLista=random.randint(0,len(listaVecinosLibres)-1)\r\n return listaVecinosLibres[posicionLista]", "def get_tags_for_NOx_HONO(AllTags=False):\n diags = [\n # Version 6 tags\n 'ProdHNO2fromHvNIT', 'ProdHNO2fromHvNITs', 'ProdHNO2fromHvNITD1',\n 'ProdHNO2fromHvNITD2', 'ProdHNO2fromHvNITD3', 'ProdHNO2fromHvNITD4',\n 'ProdNO2fromHvNIT', 'ProdNO2fromHvNITs', 'ProdNO2fromHvNITD1',\n 'ProdNO2fromHvNITD2', 'ProdNO2fromHvNITD3', 'ProdNO2fromHvNITD4',\n 'ProdNO2fromHONO', 'ProdHNO2fromOHandNO', 'ProdHNO2fromHET',\n 'ProdNOnHO2ChannelA', 'ProdNOnHO2ChannelB',\n # Version 7 tags\n 'ProdHNO3fromNO2nOH','ProdNO3fromHNO3nOH',\n 'PhotNO2', 'PhotHNO3', 'PhotHNO2',\n 'ProdHNO3fromHetNO3', 'ProdNITfromHetNO3','ProdNITsfromHetNO3',\n ]\n prefix = 'TN{:0>3}'\n tags = [prefix.format(i+1) for i in range(len(diags))]\n # pair up numbering (so that runs with different diagnostics have same #s)?\n d = dict(zip(diags, tags))\n # Include the automatic tagging of NOx\n def mk_KPP_tag_from_rxn_str(rxn_str=None, search_str=None,\n prefix='ProdfromRXN', ):\n \"\"\"\n Create a variable for reaction\n \"\"\"\n reactants = rxn_str.split('=')[0]\n reactants = reactants.replace(' + ', '_n_')\n reactants = reactants.replace(' {+M} ', '_M_').strip()\n products = rxn_str.split('=')[-1]\n products = products.replace(' + ', '_n_')\n products = products.replace(' {+M} ', '_M_').strip()\n products = products.replace(' {+M}', '_M').strip()\n products = products[:10]\n # Return a new reaction string\n return'{}_{}_{}_to_{}'.format(prefix, search_str, reactants, products)\n\n if AllTags:\n DataRoot = get_local_folder('DataRoot')\n folder = '{}{}'.format(DataRoot, '/ARNA/Misc/')\n# FName = 'Tagged_reactions_in_Standard_v12.9.1_ARNA_v8_POx_tagged.csv'\n FName = 'Tagged_reactions_in_Standard_v12.9_ARNA_v9_PL_NOx_tagged.csv'\n df = pd.read_csv(folder+FName)\n# df['RxnName'] = df['rxn_str'].map(mk_KPP_tag_from_rxn_str)\n df['RxnName'] = df.apply(lambda x:\n mk_KPP_tag_from_rxn_str(rxn_str=x['rxn_str'],\n search_str = x['search_str'], ),\n axis=1)\n\n # combine into main dictionary\n d2 = dict(zip( df['RxnName'], df['tag'].values ) )\n d = AC.merge_two_dicts(d, d2)\n return d", "def get_morphology(self, tok, pos, morph):\n if pos[0] == 'N' or pos[0] == 'V':\n if morph:\n return [tok, morph]\n return [tok]\n key = tok + '_' + pos\n if key not in self.lookup_table:\n wntag = self.WN_TAGS.get(pos[0])\n if wntag:\n self.lookup_table[key] = [self.wnl.lemmatize(tok, wntag)]\n else:\n self.lookup_table[key] = [tok]\n return self.lookup_table[key]", "def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResolverRuleTagArgs']]]]:\n return pulumi.get(self, \"tags\")", "def sensor_value(self) -> Optional[str]:\n auth_resp = requests.post(\n \"{0}/auth\".format(self.API_URL),\n json={\n \"Username\": self.config[\"portainer_username\"],\n \"Password\": self.config[\"portainer_password\"],\n },\n ).json()\n token = auth_resp[\"jwt\"]\n\n images_resp = requests.get(\n \"{0}/endpoints/{1}/docker/images/json\".format(\n self.API_URL, self.properties[CONF_ENDPOINT_ID]\n ),\n headers={\"Authorization\": \"Bearer {0}\".format(token)},\n ).json()\n\n try:\n tagged_image = next(\n (\n i\n for image in images_resp\n for i in image[\"RepoTags\"]\n if self.properties[CONF_IMAGE_NAME] in i\n )\n )\n except StopIteration:\n self.error(\n \"No match for image: {0}\".format(self.properties[CONF_IMAGE_NAME])\n )\n\n return tagged_image.split(\":\")[1].replace(\"v\", \"\").split(\"-\")[0]", "def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DataSetTagArgs']]]]:\n return pulumi.get(self, \"tags\")", "def get_taxon(self, **kwargs):\n if \"oid\" not in kwargs and \"label\" not in kwargs:\n raise TypeError(\"Need to specify Taxon oid or label.\")\n oid = kwargs.get(\"oid\", None)\n label = kwargs.get(\"label\", None)\n ci = kwargs.get(\"case_insensitive\", False)\n if ci:\n label_lower = label.lower()\n for taxon in self:\n if (oid is not None and taxon.oid == oid) \\\n or (label is not None and taxon.label == label) \\\n or (ci and label is not None and label_lower == taxon.label.lower()):\n return taxon\n return None", "def __getitem__(self, tag):\n return self.__tags.get(tag.lower(), 0)", "def _value_token_index(self):\n # TODO: memoize this value\n for i, token in enumerate(self.tokens):\n if not token.type.is_metadata:\n return i\n raise RuntimeError('could not find a value token')", "def get_positive_pair(self, index):\n video_name = self.videos[index]\n video = self.labels[video_name]\n track = np.random.choice(list(video.keys()))\n track_info = video[track]\n\n frames = track_info['frames']\n template_frame = np.random.randint(0, len(frames))\n left = max(template_frame - self.frame_range, 0)\n right = min(template_frame + self.frame_range, len(frames)-1) + 1\n search_range = frames[left:right]\n template_frame = frames[template_frame]\n search_frame = np.random.choice(search_range)\n return self.get_image_anno(video_name, track, template_frame), \\\n self.get_image_anno(video_name, track, search_frame)", "def create_parse_vso_values():\n here = os.path.dirname(os.path.realpath(__file__))\n\n # Keywords we are after\n keywords = [\"+detector\", \"+instrument\", \"+source\", \"+provider\", \"+physobs\", \"+level\"]\n # Construct and format the request\n keyword_info = {}\n url = \"https://vso1.nascom.nasa.gov/cgi-bin/registry_json.cgi\"\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n for keyword in keywords:\n data = urlencode({'fields': f\"['{keyword}']\".replace(\"'\", '\"')}).encode('ascii')\n req = Request(url=url, data=data, headers=headers)\n response = urlopen(req)\n keyword_info[keyword.replace(\"+\", \"\")] = json.loads(response.read())\n\n # Now to traverse the return and create attrs out of them.\n attrs = {}\n for key, value in keyword_info.items():\n attrs[key] = []\n for item in value:\n if item:\n if key == \"level\":\n attrs[key].append((str(item[key]), str(item[key])))\n else:\n attrs[key].append((str(item[key]), str(item[key+\"_long\"])))\n\n with open(os.path.join(here, 'data', 'attrs.json'), 'w') as attrs_file:\n json.dump(dict(sorted(attrs.items())), attrs_file, indent=2)", "def findTag(bufferNumber, changedTick):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # try to find the best tag {{{\n try:\n # get the tags data for the current buffer\n tagLineNumbers, tags = getTags(bufferNumber, changedTick)\n\n # link to vim's internal data {{{\n currentBuffer = vim.current.buffer\n currentWindow = vim.current.window\n row, col = currentWindow.cursor\n # }}}\n\n # get the index of the nearest line\n nearestLineIndex = getNearestLineIndex(row, tagLineNumbers)\n\n # if any line was found, try to find if the tag is appropriate {{{\n # (ie. the cursor can be below the last tag but on a code that has nothing\n # to do with the tag, because it's indented differently, in such case no\n # appropriate tag has been found.)\n while (nearestLineIndex > -1):\n # get the line number of the nearest tag\n nearestLineNumber = tagLineNumbers[nearestLineIndex]\n\n # walk through all the lines in range (nearestTagLine, cursorRow) {{{\n for lineNumber in range(nearestLineNumber + 1, row):\n # get the current line\n line = currentBuffer[lineNumber]\n\n # count the indentation of the line, if it's lower than the tag's, the tag is invalid {{{\n if (len(line)):\n # initialize local auxiliary variables {{{\n lineStart = 0\n i = 0\n # }}}\n\n # compute the indentation of the line {{{\n while ((i < len(line)) and (line[i].isspace())):\n # move the start of the line code {{{\n if (line[i] == '\\t'):\n lineStart += SimplePythonTagsParser.TABSIZE\n else:\n lineStart += 1\n # }}}\n\n # go to the next character on the line\n i += 1\n # }}}\n\n # if the line contains only spaces, skip it {{{\n if (i == len(line)):\n continue\n # }}}\n # if the next character is a '#' (python comment), skip the line {{{\n if (line[i] == '#'):\n continue\n # }}}\n # if the next character is a ')', skip the line {{{\n # this is so that the following style works correctly:\n #\n # def foo(\n # args,\n # ):\n # pass\n if (line[i] == ')'):\n continue\n # }}}\n\n # if the line's indentation starts before or at the nearest tag's one, the tag is invalid {{{\n if (lineStart <= tags[nearestLineNumber].indentLevel):\n nearestLineIndex -= 1\n break\n # }}}\n # }}}\n # }}}\n # the tag is appropriate, so use it {{{\n else:\n break\n # }}}\n # }}}\n # no appropriate tag has been found {{{\n else:\n nearestLineNumber = -1\n # }}}\n\n # describe the cursor position (what tag the cursor is on) {{{\n # reset the description\n tagDescription = \"\"\n\n # if an appropriate tag has been found, set the description accordingly {{{\n if (nearestLineNumber > -1):\n tagInfo = tags[nearestLineNumber]\n tagDescription = \"[%s]\" % (tagInfo.fullName, ) # not using PythonTag.TAG_TYPE_NAME[tagInfo.type] because ENOSPC\n # }}}\n # }}}\n\n # update the variable for the status line so it get updated with the new description\n vim.command(\"let w:PHStatusLine=\\\"%s\\\"\" % (tagDescription,))\n # }}}\n\n # handle possible exceptions {{{\n except Exception:\n # bury into the traceback {{{\n ec, ei, tb = sys.exc_info()\n while (tb != None):\n if (tb.tb_next == None):\n break\n tb = tb.tb_next\n # }}}\n\n # spit out the error {{{\n print(\"ERROR: %s %s %s:%u\" % (ec.__name__, ei, tb.tb_frame.f_code.co_filename, tb.tb_lineno,))\n time.sleep(0.5)\n # }}}\n # }}}\n # }}}", "def __check_single_sign_value(series, log=False):\n # gets useful values\n negative_values_unique, positive_values_unique = set(series[series < 0]), \\\n set(series[series > 0])\n if len(negative_values_unique) == 1 and len(positive_values_unique) > 1:\n series = series.replace(to_replace=list(negative_values_unique), value=np.nan)\n elif len(positive_values_unique) == 1 and len(negative_values_unique) > 1:\n series = series.replace(to_replace=list(positive_values_unique), value=np.nan)\n\n return series", "def tag_word(self, w): \n if self.unknown(w):\n return self.default_tag\n else:\n return max(self.word_tags[w], key=self.word_tags[w].get)", "def tags(self) -> pulumi.Output[Optional[Sequence['outputs.DomainTag']]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> pulumi.Output[Optional[Sequence['outputs.DomainTag']]]:\n return pulumi.get(self, \"tags\")", "def convertVBtoJJ(POS_tag, vb_docs):\n wanted_POS = ['VBN', 'VBD','VBG']\n for i, word in enumerate(POS_tag):\n if word[1] in wanted_POS:\n if vb_docs.loc[vb_docs[word[1]] == word[0], 'JJ'] is not None:\n sub_vb = vb_docs.loc[vb_docs[word[1]] == word[0], 'JJ']\n if len(sub_vb) > 0:\n POS_tag[i] = (sub_vb.get_values()[0], 'JJ')\n return POS_tag", "def get_tag_index(tags, tag_to_search):\n counter = 0\n for t in tags:\n if tag_to_search == t:\n break\n else:\n counter+=1\n return counter" ]
[ "0.61236465", "0.5222297", "0.52140766", "0.49421668", "0.49218923", "0.49208298", "0.47892955", "0.47874418", "0.47012714", "0.46522093", "0.4625487", "0.46219954", "0.45822042", "0.45629826", "0.45510745", "0.45039132", "0.44938043", "0.44818074", "0.44803494", "0.44793394", "0.44693768", "0.4417272", "0.44152057", "0.44027513", "0.43906337", "0.43906015", "0.4380242", "0.4373906", "0.43715787", "0.43593135", "0.43590173", "0.43544593", "0.4349198", "0.43362835", "0.43334305", "0.43060845", "0.42921114", "0.4264981", "0.42575353", "0.42575353", "0.42435956", "0.42431986", "0.4234368", "0.42324245", "0.4209749", "0.41975963", "0.41771224", "0.41727298", "0.4170804", "0.41606596", "0.41598308", "0.41596425", "0.41500786", "0.4145904", "0.4135379", "0.41330647", "0.4129889", "0.41281307", "0.4116937", "0.4109789", "0.41047612", "0.41027558", "0.40989643", "0.40985245", "0.40755725", "0.40750074", "0.407193", "0.40716183", "0.40659758", "0.40607512", "0.40536797", "0.40424737", "0.40378565", "0.4028997", "0.4025966", "0.4024546", "0.4024164", "0.40186396", "0.4012659", "0.4012473", "0.4012473", "0.4004348", "0.39947498", "0.39925674", "0.3985549", "0.39831275", "0.39795747", "0.39782616", "0.39730954", "0.39728573", "0.39711812", "0.39704993", "0.39696905", "0.39614353", "0.39609712", "0.39607677", "0.39559162", "0.39559162", "0.39532295", "0.39524934" ]
0.7129057
0
prepro 200x235x3 uint8 frame into 8300 (83x100) 1D float vector
def prepro(I): # """ prepro 200x235x3 uint8 frame into 10000 (100x100) 1D float vector """ I = I[35:200] # crop - remove 35px from start & 35px from end of image in x, to reduce redundant parts of image (i.e. after ball passes paddle) I = I[::2,::2,0] # downsample by factor of 2 I[I == 43] = 0 # erase background (background type 1) I[I != 0] = 1 # everything else (paddles, ball) just set to 1 return I.astype(np.float).ravel()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess(self, frame: np.ndarray) -> torch.TensorType:\n tensor = cv.resize(frame, (self.IMGSZ, self.IMGSZ)) \n tensor = tensor.transpose(2, 0, 1)\n tensor = torch.from_numpy(tensor)\n tensor = torch.unsqueeze(tensor, 0)\n tensor = tensor.half() if self.half else tensor.float()\n tensor = tensor / 255.0\n tensor = tensor.to(self.device)\n\n return tensor", "def processing_data(raw_data):\n data = np.frombuffer(raw_data, np.uint8)\n data = np.reshape(data, [data.shape[0]//1029, -1])\n data = data[:, 5:]\n data = np.reshape(data, [1, -1])\n data = 256 * data[0, 0::2] + data[0, 1::2]\n data = 10 * (data / 65535)\n data = np.reshape(data, [-1, 8]).T\n return data", "def _decode_35708(data):\n start_byte = 0\n n_bytes = 2\n var_id = struct.unpack('<H', data[start_byte:start_byte + n_bytes])[0]\n if var_id == 29974:\n start_byte += n_bytes\n n_bytes = 4\n var_size = struct.unpack('<I', data[start_byte:\n start_byte + n_bytes])[0]\n start_byte += n_bytes\n n_bytes = var_size\n\n return np.frombuffer(data[start_byte:start_byte + n_bytes],\n dtype=np.float64)", "def normalize_frames(frames):\n new_frames = frames.astype(np.float32)\n new_frames /= (255 / 2)\n new_frames -= 1\n\n return new_frames", "def vid2tensor( self, current_frame):", "def uint8_to_float(im: np.array):\n if im.dtype == np.float32:\n warnings.warn(\"Image is already np.float32\")\n return im\n im = im.astype(np.float32) / 255\n return im", "def uint8_to_float(im: np.array):\n if im.dtype == np.float32:\n warnings.warn(\"Image is already np.float32\")\n return im\n im = im.astype(np.float32) / 255\n return im", "def denormalize_frames(frames):\n new_frames = frames + 1\n new_frames *= (255 / 2)\n # noinspection PyUnresolvedReferences\n new_frames = new_frames.astype(np.uint8)\n\n return new_frames", "def pre_handler(frame):\n img_data, _im0 = preprocess(frame, IMAGE_HEIGHT, IMAGE_WIDTH, False)\n return kdp_wrapper.convert_float_to_rgba(img_data, 8, 520, True)", "def convert_to_vector(img_arr):\n img = img_arr[0:248, 0:248, 0]\n img = img.flatten()\n return img", "def to_uint8(f):\n from numpy import array, clip, uint8\n\n img = array(clip(f,0,255),uint8)\n return img", "def _image_to_vector(image):\n return image.flatten().astype(float)", "def _process_data(data: np.ndarray) -> np.ndarray:\r\n result: np.ndarray = np.empty(shape=(0, 0))\r\n i = 0\r\n while i < (len(data) - 1):\r\n # Found beginning of frame\r\n if data[i] > 127:\r\n # Extract one sample from 2 bytes\r\n intout = (np.bitwise_and(data[i], 127)) * 128\r\n i += 1\r\n intout = intout + data[i]\r\n result = np.append(result, intout)\r\n i += 1\r\n return result", "def process_image(img):\n img[0] = img[0] * 0.229\n img[1] = img[1] * 0.224\n img[2] = img[2] * 0.225\n img[0] += 0.485\n img[1] += 0.456\n img[2] += 0.406\n\n return img.cpu().numpy().transpose((1, 2, 0))", "def preprocess(image):\n image = rgb2yuv(image)\n return image", "def decode_frame(self, buf):\n import numpy as np\n from cv2 import cvtColor\n\n w, h = self._resolution\n arr = np.fromstring(buf, 'uint8').reshape((h + h / 2, w))\n arr = cvtColor(arr, 93) # NV21 -> BGR\n return arr", "def _convert_frame_data(jpeg_data):\n decoded_frames = tf.image.decode_jpeg(jpeg_data)\n return tf.image.convert_image_dtype(decoded_frames, dtype=tf.float32)", "def Motion_estimate_reverse_1frame(ref0_frame,ref1_frame,P_frame,block_size):\n \n nb_blocks = width//block_size*height//block_size\n \n vect_field = np.array(P_frame[:nb_blocks*3])\n vect_field = vect_field.reshape((height//block_size,width//block_size,3))\n \n frame_error = DCT_inverse(np.array(P_frame[nb_blocks*3:]),offset=0)\n tar_Y = frame_error[ :sep1].reshape(height,width)\n tar_U = frame_error[sep1:sep2].reshape(height//2,width//2)\n tar_V = frame_error[sep2: ].reshape(height//2,width//2)\n \n ref_frame = [ref0_frame,ref1_frame]\n \n for X in range(0,height//block_size):\n for Y in range(0,width//block_size):\n xa, xz = X*block_size,(X+1)*block_size\n ya, yz = Y*block_size,(Y+1)*block_size\n \n ref,vx,vy = vect_field[X,Y,:]\n \n pxa, pxz = xa+vx,xz+vx\n pya, pyz = ya+vy,yz+vy\n \n patch_Y = ref_Y[ref][pxa:pxz,pya:pyz]\n patch_U = ref_U[ref][pxa//2:pxz//2,pya//2:pyz//2]\n patch_V = ref_V[ref][pxa//2:pxz//2,pya//2:pyz//2]\n \n tar_Y[xa:xz,ya:yz] += patch_Y\n tar_U[xa//2:xz//2,ya//2:yz//2] += patch_U\n tar_V[xa//2:xz//2,ya//2:yz//2] += patch_V\n\n target_frame = np.concatenate((tar_Y.flatten(),\n tar_U.flatten(),\n tar_V.flatten()))\n return target_frame", "def preprocess(self, data):\n (w,h,f) = self.rawinputformat()\n dt = numpy.dtype(numpy.uint8)\n nb = numpy.frombuffer(data,dt,-1,0)\n actual_stream_width = (w&1)+w # hack, rather get this from the app sink\n if(actual_stream_width != self.reqsize):\n nb = nb.reshape(h,actual_stream_width,3)\n nb = nb[0:h,0:w,0:3] # crop to network input size\n else:\n nb = nb.reshape((actual_stream_width,actual_stream_width,3))\n img = nb.astype('float32')\n #Preprocess image\n #for i in range(3):\n # img[:,:,i] = (img[:,:,i] - self.mean[i]) * self.std[i]\n #img = resize(img/255.0,(w,h),1)\n img = img/255.0\n print(img.shape)\n #print(img[0,0,:])\n return img.astype(numpy.float16)", "def to_data(x):\n if torch.cuda.is_available():\n x = x.cpu()\n x = x.data.numpy()\n x = ((x +1)*255 / (2)).astype(np.uint8) # rescale to 0-255\n return x", "def get_signal_gwgds1072au(a_signal_packed: bytes, a_scale : float ) -> list:\n the_return = None\n the_signal_packed=a_signal_packed\n the_scale=a_scale\n the_signal_sequence=[]\n the_signal=0.0 #TODO reminder check this before allowing it\n the_info=[]\n n=4\n bla=0\n blb=bla+n\n print(the_signal_packed)\n JX=unpack('>%sh' % 2 ,the_signal_packed[bla:blb])\n for ii in range(0,2003):\n the_info.append(unpack('>%sh' % 2 ,the_signal_packed[bla:blb])[0])\n bla=bla+n\n blb=blb+n\n #TODO get the potential scale\n #TODO get the offset\n #TODO get the time scale\n\n return the_info", "def preprocess_frame(self, frame):\n state = torch.Tensor(frame)\n return gpuify(state, self.gpu_id)", "def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack(\">HHL\", value)\n exponent = (short1 & 0x7F00) // 256 - 64\n mantissa = (\n ((short1 & 0x00FF) * 65536 + short2) * 4294967296 + long3\n ) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.0 ** exponent\n return mantissa * 16.0 ** exponent", "def vector_convert(self, pos) :\n delta = AUTO_width2//3\n bright_list = []\n for i in range(-1,2):\n for j in range(-1,2) :\n b = 0\n count = 0\n for x in range(max(0, pos[0] + i*delta), min(self.m_x, pos[0] + (i+1)*delta)):\n for y in range(max(0, pos[1] + j*delta), min(self.m_y, pos[1] + (j+1)*delta)):\n b += self.current_array[x][y]\n count += 1\n if count == 0 :\n bright_list.append(0)\n else :\n if b == 0 : #prevent 0 divde\n b = 1\n bright_list.append(b/count)\n bright_list = np.array(bright_list)\n m = np.max(bright_list)/self.current_total_avg\n bright_list = bright_list/np.min(bright_list) -1\n bright_list = np.append(bright_list, m)\n return bright_list", "def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack('>HHL', value)\n exponent = (short1 & 0x7f00) // 256 - 64\n mantissa = (((short1 & 0x00ff) * 65536 + short2) * 4294967296 +\n long3) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.**exponent\n return mantissa * 16.**exponent", "def snd_rcv(self, u):\n self._soc.send(struct.pack(self._u_fmt, *u))\n data = self._soc.recv(self._buf_size)\n return np.array(struct.unpack(self._x_fmt, data), dtype=np.float32)", "def grabRawFrame(self):\r\n \r\n self.surface = self.capture.get_image(self.surface)\r\n width, height = self.surface.get_size()\r\n return pygame.image.tostring(self.surface, 'RGB'), width, height, 1", "def preprocess(x):\n if x.shape[-1] < 16000 * 8:\n raise ValueError(\n \"Cannot preprocess tensor less than 8 seconds in duration.\"\n )\n vad = VadChunk(*get_vad(\"both\"))\n return vad(x)", "def float32_to_uint8(inputs):\n return np.uint8(np.clip(np.round(inputs * 255), 0, 255))", "def convert_to_continuos_f0(f0):\n # get uv information as binary\n uv = np.float32(f0 != 0)\n\n # get start and end of f0\n if (f0 == 0).all():\n logging.warning(\"all of the f0 values are 0.\")\n return uv, f0\n start_f0 = f0[f0 != 0][0]\n end_f0 = f0[f0 != 0][-1]\n\n # padding start and end of f0 sequence\n start_idx = np.where(f0 == start_f0)[0][0]\n end_idx = np.where(f0 == end_f0)[0][-1]\n f0[:start_idx] = start_f0\n f0[end_idx:] = end_f0\n\n # get non-zero frame index\n nz_frames = np.where(f0 != 0)[0]\n\n # perform linear interpolation\n f = interp1d(nz_frames, f0[nz_frames])\n cont_f0 = f(np.arange(0, f0.shape[0]))\n\n return uv, cont_f0", "def postprocess_img(img):\n img = img.transpose((1, 2, 0))\n img += 1.0\n img = (img * 128.0).astype(np.uint8)\n return img", "def snapshot2(self) -> np.array:\n fbo = self.fbo\n data = fbo.read(components=3, dtype='f4')\n w, h = self.size\n return np.flipud(np.frombuffer(data, dtype='f4').reshape((h, w, 3)))", "def normalize_image(img_arr_uint):\n return img_arr_uint.astype(np.float64) * ONE_BYTE_SCALE", "def read_pts(data: bytearray, offset: int) -> float:\n a = data[offset] & 0x0E\n b = data[offset + 1] & 0xFF\n c = data[offset + 2] & 0xFF\n d = data[offset + 3] & 0xFF\n e = data[offset + 4] & 0xFF\n\n return (((a & 0x0E) << 29) |\n (((((b << 8) | c) & 0xFFFF) >> 1) << 15) |\n ((((d << 8) | e) & 0xFFFF) >> 1)) / 90", "def bytes_to_yuv(data, resolution):\n width, height = resolution\n fwidth, fheight = raw_resolution(resolution)\n y_len = fwidth * fheight\n uv_len = (fwidth // 2) * (fheight // 2)\n if len(data) != (y_len + 2 * uv_len):\n raise PiCameraValueError(\n 'Incorrect buffer length for resolution %dx%d' % (width, height))\n # Separate out the Y, U, and V values from the array\n a = np.frombuffer(data, dtype=np.uint8)\n Y = a[:y_len].reshape((fheight, fwidth))\n Uq = a[y_len:-uv_len].reshape((fheight // 2, fwidth // 2))\n Vq = a[-uv_len:].reshape((fheight // 2, fwidth // 2))\n # Reshape the values into two dimensions, and double the size of the\n # U and V values (which only have quarter resolution in YUV4:2:0)\n U = np.empty_like(Y)\n V = np.empty_like(Y)\n U[0::2, 0::2] = Uq\n U[0::2, 1::2] = Uq\n U[1::2, 0::2] = Uq\n U[1::2, 1::2] = Uq\n V[0::2, 0::2] = Vq\n V[0::2, 1::2] = Vq\n V[1::2, 0::2] = Vq\n V[1::2, 1::2] = Vq\n # Stack the channels together and crop to the actual resolution\n return np.dstack((Y, U, V))[:height, :width]", "def simple_unet_data():\n return tf.constant(value=1.0, shape=(1, 256, 256, 1))", "def de_project(np_arr):\n item = (np_arr +1)*255 / 2\n return item.astype(np.int32, copy=True)", "def PreProcessing(image):\n\timage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\timage = cv2.resize(image, (300, 300))\n\t# type conversion to UINT8\n\timage = image.astype(np.uint8).copy()\n\treturn image", "def calculate_frame(self):\n frame = self.stream.read()\n self.keypoints, self.image = self.openpose.forward(frame, True)", "def _prepare_frame(self, frame):\n\n initial_h, initial_w = frame.shape[:2]\n scale_h, scale_w = initial_h / float(self.input_height), initial_w / float(self.input_width)\n\n in_frame = cv2.resize(frame, (self.input_width, self.input_height))\n in_frame = in_frame.transpose((2, 0, 1))\n in_frame = in_frame.reshape(self.input_size)\n\n return in_frame, scale_h, scale_w", "def receptive_field(self):\n frames = 0\n for f in self.pad:\n frames += f\n return 1 + 2 * frames", "def receptive_field(self):\n frames = 0\n for f in self.pad:\n frames += f\n return 1 + 2 * frames", "def pipeRAFT(RAFTframe, offset=128, magFactor=2):\r\n return (RAFTframe.astype(np.float16)-offset)/magFactor", "def _read_uint12_video_prec(data, shape):\n data = np.memmap(data, dtype=np.uint8, mode=\"r\")\n return nb_read_uint12(data).reshape(shape)", "def one_2_uint8(one_arr):\n assert (one_arr.dtype == 'float' and np.max(one_arr <= 1.0)), \\\n 'improc.one_2_uint8() only accepts floats arrays from 0 to 1.'\n return (255*one_arr).astype('uint8')", "def preprocess_3d(im_stack):\n im_stack /= 127.5\n im_stack -= 1.0\n return im_stack", "def _calculate_float(byte_array):\n\tif len(byte_array) != 4:\n\t\treturn None\n\n\t'''\n\tmsg_prefix = \"[_calculate_float] \"\n\tprint(f\"{msg_prefix}byte_array = {[hex(b) for b in byte_array]}\")\n\t\n\t# if OPC_BIT_ORDER == MB_BIT_ORDER:\n\tpack_fstr = '4B'\n\tprint(f\" --> Using '{pack_fstr}' as pack_str: f = {round(struct.unpack('f', struct.pack(pack_fstr, *byte_array))[0], 5)}\")\n\t# else:\n\t# \tif OPC_BIT_ORDER == LSBFIRST: ## Little endian\n\tpack_fstr = '<4B'\n\tprint(f\" --> Using '{pack_fstr}' as pack_str: f = {round(struct.unpack('f', struct.pack(pack_fstr, *byte_array))[0], 5)}\")\n\t\t# else: \t## Big endian\n\tpack_fstr = '>4B'\n\tprint(f\" --> Using '{pack_fstr}' as pack_str: f = {round(struct.unpack('f', struct.pack(pack_fstr, *byte_array))[0], 5)}\")\n\t'''\n\n\tf = struct.unpack('f', struct.pack('4B', *byte_array))[0]\n\t# f = struct.unpack('f', struct.pack(pack_fstr, *byte_array))[0]\n\treturn round(f, 5)", "def unpack_mraw_frame_10bit(file,n_pixels,start_frame=0):\n \n start_byte = start_frame*n_pixels*10/8\n file.seek(start_byte)\n image = []\n \n n_bytes = n_pixels*10/8\n \n int_array = np.fromfile(file,count=n_bytes,dtype=np.uint8)\n \n bytes_1 = int_array[::5]\n bytes_2 = int_array[1::5] \n bytes_3 = int_array[2::5]\n bytes_4 = int_array[3::5] \n bytes_5 = int_array[4::5]\n\n \n # Here 4 pixels from the image are shared between 5 bytes of data like\n #\n # | byte 1 | byte 2 | byte 3 | byte 4 | byte 5 |\n # |o o o o o o o o | o o | o o o o o o | o o o o | o o o o | o o o o o o | o o | o o o o o o o o|\n # | Pixel 1 | Pixel 2 | Pixel 3 | Pixel 4 |\n #\n # byte 2 is shared between pixel and we need only the right-most bits for pixel 2 and\n # only the left most bits for pixel 1. \n \n # right-most bits of byte 2 = Most significant bits of Pixel 2\n # left-most bits of byte 2 = Least significant bits of Pixel 1\n \n pix_1 = np.array(4.0*bytes_1 + np.right_shift(bytes_2,6),dtype=np.uint16)\n pix_2 = np.array(16.0*np.bitwise_and(bytes_2,0b111111) + np.right_shift(bytes_3,4),dtype=np.uint16)\n pix_3 = np.array(64.0*np.bitwise_and(bytes_3,0b1111) + np.right_shift(bytes_4,2),dtype=np.uint16)\n pix_4 = np.array(256.0*np.bitwise_and(bytes_4,0b11) + bytes_5,dtype=np.uint16)\n #try:\n image = (np.dstack([pix_1,pix_2,pix_3,pix_4])).reshape((1,n_pixels))[0]\n #except:\n # image = np.zeros(n_pixels)\n return image", "def convert_lf(self, lf):\n lf = np.float32(lf)\n if np.max(lf) > 1:\n lf = lf/256\n lf = np.uint8(lf*256)\n \n if lf.shape[-1] > 3:\n lf[lf[:,:,:,:,3] == 0] = (255,255,255,0) #convert alpha to white. \n lf = lf[:,:,:,:,:3]\n # while lf.ndim < 6:\n # lf = np.expand_dims(lf, 0)\n \n lf = resize_lightfield(lf, (self.width, self.height)) \n return lf", "def morph_frame(mid0, mid1, dissolve_frac):\n frame = mid0.astype(np.float32) * (1 - dissolve_frac) + mid1.astype(np.float32) * dissolve_frac\n return frame.astype(np.uint8)", "def yuv_bytes(self):\n r, g, b = self.rgb_bytes\n return (\n (( 66 * r + 129 * g + 25 * b + 128) >> 8) + 16,\n ((-38 * r - 73 * g + 112 * b + 128) >> 8) + 128,\n ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128,\n )", "def make_frame(psfs):\n # Empty frame\n frame = np.zeros((256, 2124))\n\n # Add each psf\n for n, psf in enumerate(psfs):\n frame[:, n:n+76] += psf\n\n return frame[:, 38:-38]", "def _shorts2float(lo_byte_pair, hi_byte_pair):\n\tba = bytearray(struct.pack(\"HH\", lo_byte_pair, hi_byte_pair))\n\t[f] = struct.unpack('f', ba)\n\treturn f", "def binconv(fp, fp_len):\n vec = [1] * fp_len\n #print(fp, len(fp))\n for indx, b in enumerate(fp):\n if indx == 224:\n break\n if b == '0':\n vec[indx] = -1\n return vec", "def raw_to_tif(file, channel=None ):\n \n def read_uint12(data_chunk):\n data = np.frombuffer(data_chunk, dtype=np.uint8)\n fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n # fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n # snd_uint12 = (lst_uint8 << 4) + (np.bitwise_and(15, mid_uint8))\n fst_uint12 = (fst_uint8 << 4) + (np.bitwise_and(15, mid_uint8))\n snd_uint12 = (lst_uint8 << 4) + (mid_uint8 >> 4)\n return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n\n# def read_uint12(data_chunk):\n# data = np.frombuffer(data_chunk, dtype=np.uint8)\n# fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n# fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n# snd_uint12 = ((mid_uint8 % 16) << 8) + lst_uint8\n# return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n\n# def read_uint12(data_chunk):\n# data = np.frombuffer(data_chunk, dtype=np.uint8)\n# fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n# fst_uint12 = ((mid_uint8 & 0x0F) << 8) | fst_uint8\n# snd_uint12 = (lst_uint8 << 4) | ((mid_uint8 & 0xF0) >> 4)\n# return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n \n # infile = 'd:\\\\Projekti\\\\Satelit\\\\CO\\\\Razpis\\\\Flat field images_new2020\\\\flatfield\\\\NHDBflat_1D'\n # infile = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Jure_naloga_banje_raw_pyt\\\\NHDRGoreMorje_3D'\n\n # in_path = 'p:\\\\NEMO\\Posnetki\\\\20201014_GoreMorje_data\\cele\\\\'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_image_files = [filename for filename in os.listdir(in_path) if filename.lower().startswith(\"nhd\") and filename.lower().endswith(\"d\")]\n\n \n # infile = in_path + in_image_files[i]\n with open(file, 'rb', buffering=10) as f: # problem pri branju podatkov?\n byte = f.read()\n print(file)\n # # ar = open(infile, 'rb')\n # buffer = BytesIO()\n # byte = BytesIO(ar)\n \n img = read_uint12(byte)\n print(img)\n \n if channel==\"P\":\n img = img.reshape((2748, 3664)) # PAN\n else:\n img = img.reshape((2050, 2448)) # MS\n # img = img.reshape((2748, 3664)) # PAN\n\n size = img.shape\n \n \n out = file[:-4]+ \"_py.tif\"\n\n driver = gdal.GetDriverByName('GTiff')\n\n outRaster = driver.Create(out, size[1], size[0], 1, gdal.GDT_UInt16)\n\n outband = outRaster.GetRasterBand(1)\n outband.WriteArray(img)\n outband.FlushCache()", "def test_f8_xf16_roundtrip(in_dtype, out_dtype):\n check_type_supported(out_dtype)\n\n @triton.jit\n def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):\n offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)\n mask = offsets < n_elements\n input = tl.load(input_ptr + offsets, mask=mask)\n output = input\n tl.store(output_ptr + offsets, output, mask=mask)\n\n f8_tensor = torch.tensor(range(-128, 128), dtype=torch.int8, device='cuda')\n # f32_to_f8 doesn't handle nan, so we make sure f8_tensor doesn't contain any nan\n all_exp_ones = (f8_tensor & 0b01111100) == 128 - 2**in_dtype.fp_mantissa_width\n f8_tensor[all_exp_ones] = 0\n f8 = triton.reinterpret(f8_tensor, in_dtype)\n n_elements = f8_tensor.numel()\n xf16 = torch.empty_like(f8_tensor, dtype=out_dtype)\n grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)\n copy_kernel[grid](f8, xf16, n_elements, BLOCK_SIZE=1024)\n\n # exponent_mask = 0b01111100 for float8e5\n # exponent_mask = 0b01111000 for float8e4\n exponent_mask = 0b01111111 ^ ((1 << in_dtype.fp_mantissa_width) - 1)\n normal = torch.logical_and((f8_tensor & exponent_mask) != 0, (f8_tensor & exponent_mask) != exponent_mask)\n ref16 = convert_float_to_float32(f8_tensor, in_dtype)\n # WARN: currently only normal float8s are handled\n assert torch.all(xf16[normal] == ref16[normal])\n\n f8_output_tensor = torch.empty_like(xf16, dtype=torch.int8)\n f8_output = triton.reinterpret(f8_output_tensor, in_dtype)\n copy_kernel[grid](xf16, f8_output, n_elements, BLOCK_SIZE=1024)\n\n assert torch.all(f8_tensor == f8_output_tensor)", "def test_quantize_conv_transpose_u8u8(self):\n\n np.random.seed(1)\n model_fp32_path = \"conv_transpose_fp32.onnx\"\n self.construct_model(model_fp32_path)\n data_reader = self.input_feeds(1, {\"input\": [1, 1, 7, 7]})\n\n self.static_quant_test_qdq(\n model_fp32_path,\n data_reader,\n activation_type=QuantType.QUInt8,\n weight_type=QuantType.QUInt8,\n )", "def get_frame(self,t):\n\n return pyfx.util.to_array(self._img_list[t],dtype=np.uint8,\n num_channels=4)", "def _read_uint12_video(data, shape):\n data = np.memmap(data, dtype=np.uint8, mode=\"r\")\n fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n snd_uint12 = ((mid_uint8 % 16) << 8) + lst_uint8\n return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), shape)", "def ReadFixed8(self):\n fval = self.ReadInt64()\n return Fixed8(fval)", "def predict(self):\n for src_p, pair in enumerate(self.pairs):\n dst_p = pair[1].argmax()\n dst_ind = pair[0][dst_p]\n\n self.vector_field.append(np.hstack([self.frame_0[src_p], self.frame_1[dst_ind]]))\n\n self.vector_field = np.vstack(self.vector_field)\n\n return self.vector_field", "def extract_data(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(28 * 28 * 10000 * 1)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n data = (data - (255 / 2.0)) / 255\n data = data.reshape(10000, 28, 28, 1)\n return data", "def __call__(self, uint8_image, n_upscales=0):\n rects = self._dlib_model(uint8_image, n_upscales)\n return [rect_to_pointgraph(r) for r in rects]", "def readFLO(file):\r\n\r\n tag_float = 202021.25\r\n with open(file) as f:\r\n nbands = 2\r\n tag = np.fromfile(f, np.float32, 1)[0]\r\n\r\n if tag != tag_float:\r\n raise ValueError('wrong tag possibly due to big-endian machine?')\r\n\r\n width = np.fromfile(f, np.int32, 1)[0]\r\n height = np.fromfile(f, np.int32, 1)[0]\r\n\r\n tmp = np.fromfile(f, np.float32)\r\n tmp = tmp.reshape(height, width * nbands)\r\n\r\n flow = np.zeros((height, width, 2))\r\n flow[:, :, 0] = tmp[:, 0::2]\r\n flow[:, :, 1] = tmp[:, 1::2]\r\n\r\n return flow", "def preprocess(x, y):\n x = tf.cast(x, tf.float32) / 255.0\n x = tf.reshape(x, [28*28])\n y = tf.cast(y, tf.int32)\n return x, y", "def preprocess(image):\n return (image / 255) * 2 - 1", "def make8UC(mat):\n mat_256 = mat[:,:]# *255\n mat_256.round()\n mat_8UC = np.uint8(mat_256)\n \n return mat_8UC", "def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n x = self.upsample(x)\n return x", "def video_to_array(video_file, n_frames=256):\n video = mpe.VideoFileClip(video_file)\n video_array = np.array([f for f in video.iter_frames()])\n video.reader.close()\n del video.reader\n del video\n\n if video_array.shape[0] > n_frames:\n return video_array[:n_frames].astype(np.float32)\n else:\n shape = video_array.shape\n pad = np.zeros([n_frames - shape[0], shape[1], shape[2], shape[3]])\n return np.concatenate([video_array, pad]).astype(np.float32)", "def img_to_vector(img_fn, label=0):\r\n img = \"\"\r\n for line in open(img_fn).readlines()[:32]:\r\n img += line[:32]\r\n\r\n # labels are always attached at the last position\r\n itera = [_ for _ in img + str(label)]\r\n return numpy.fromiter(itera, \"f4\")", "def _preprocess_input(self, x):\n\n # 'RGB'->'BGR'\n x = x[:, :, ::-1]\n # Zero-center by mean pixel\n x = x - tf.stack((tf.ones_like(x[:, :, :, 0]) * tf.constant(103.939),\n tf.ones_like(x[:, :, :, 1]) * tf.constant(116.779)\n , tf.ones_like(x[:, :, :, 2]) * tf.constant(123.68)), axis=-1)\n\n # x = 2*x/255\n return x", "def unpack_mraw_frame_12bit(file,n_pixels,start_frame=0):\n \n start_byte = start_frame*n_pixels*12/8\n file.seek(start_byte)\n image = []\n \n n_bytes = n_pixels*12/8\n \n int_array = np.fromfile(file,count=n_bytes,dtype=np.uint8)\n \n bytes_1 = int_array[::3]\n bytes_2 = int_array[1::3] \n bytes_3 = int_array[2::3]\n\n \n # Here 2 pixels from the image are shared between three bytes of data like\n #\n # | byte 1 | byte 2 | byte 3 |\n # |o o o o o o o o|o o o o | o o o o|o o o o o o o o|\n # | Pixel 1 | Pixel 2 |\n #\n # byte 2 is shared between pixel and we need only the right-most bits for pixel 2 and\n # only the left most bits for pixel 1. \n \n # right-most bits of byte 2 = Most significant bits of Pixel 2\n # left-most bits of byte 2 = Least significant bits of Pixel 1\n \n pix_1 = np.array(16.0*bytes_1 + np.right_shift(bytes_2,4),dtype=np.uint16)\n pix_2 = np.array(256.0*np.bitwise_and(bytes_2,0b1111) + bytes_3,dtype=np.uint16)\n \n try:\n image = (np.dstack([pix_1,pix_2])).reshape((1,n_pixels))[0]\n except:\n image = np.zeros(n_pixels)\n return image", "def __float__(self):\n return float(self.encoded) / (1 << self.frac_bits)", "def flux(self, u):\n flu = np.zeros((3,2), dtype=np.float64)\n flu[0,0] = u[1]\n flu[1,0] = u[0] * (u[1]/u[0])**2 + 0.5 * 9.81*u[0]**2\n flu[2,0] = u[1] * u[2]/u[0] #FIXME attenzione che c'è il punto controllare se sono scalari o vettori'\n flu[0,1] = u[2]\n flu[1,1] = u[2] * u[1]/u[0]\n flu[2,1] = u[0] * (u[2]/u[0])**2 + 0.5 * 9.81*u[0]**2\n return flu", "def translate_frames(model, frames):\n frames_v = autograd.Variable(torch.FloatTensor(frames).cuda())\n out_frames = model(frames_v)\n return out_frames.cpu().numpy()", "def processFrame(us_spacing, frame_np, frame_mat, clip_info):\n # print('us_spacing {}'.format(us_spacing))\n # print('frame_np {}'.format(frame_np))\n # print('frame_mat {}'.format(frame_mat))\n # print('clip_info {}'.format(clip_info))\n # sys.exit()\n clip_x, clip_y, clip_h, clip_w = clip_info\n\n fixed_np = frame_np[clip_x:clip_x+clip_h, clip_y:clip_y+clip_w]\n mat_scales = computeScale(input_mat=frame_mat)\n # print('matscales {}'.format(mat_scales))\n spacing = np.mean(mat_scales[:2]) / us_spacing[0]\n frame_w = int(spacing * fixed_np.shape[1])\n frame_h = int(spacing * fixed_np.shape[0])\n fixed_np = cv2.resize(fixed_np, (frame_w, frame_h))\n fixed_np = fixed_np.astype(np.float64)\n return fixed_np", "def preprocess_image(image):\n\n image = tf.to_float(image)\n image = tf.subtract(image, 128.0)\n image = tf.div(image, 128.0)\n return image", "def to_uint16(f):\n from numpy import array, clip\n\n img = array(clip(f,0,65535)).astype('H')\n return img", "def preprocess_frame(frame, v_crop=(0, 0), h_crop=(0, 0)):\n\n # heigth, width, _ = frame.shape\n # frame = np.mean(frame, axis=2) / 255.0\n # frame = frame[v_crop[0]:heigth - v_crop[1], h_crop[0]:width - h_crop[1]]\n # frame = imresize(frame, size=(80, 80), interp='nearest')\n\n frame = frame[::2, ::2]\n frame = np.mean(frame, axis = 2).astype(np.uint8)\n return frame", "def frame_pre_process(self, frame):\n assert len(frame.shape) == 3, \\\n \"Expected input frame in (H, W, C) format proposed\"\n assert frame.shape[2] in [3, 4], \\\n \"Expected BGR or BGRA input process\"\n # setup the frame in the original format\n \n #orig_image = frame.copy()\n original_image = frame.copy()\n \n # creating the frame transpose conversion\n frame = frame.transpose((2, 0, 1)) # Converting from HWC to CHW\n \n # creating the frame dimensions\n frame = np.expand_dims(frame, axis=0)\n \n # return the frames outcome\n return (frame)", "def r8_f2(t, y):\n yp = np.zeros(np.size(y))\n yp[0] = y[1]\n yp[1] = -y[0]\n return(yp)", "def to_input_image(camera_frame):\n\tcv2_img = IMAGES_BRIDGE.imgmsg_to_cv2(camera_frame, \"bgr8\")\n\tcv2_img = cv2.resize(cv2_img, PROCESSED_IMG_SIZE)\n\timg = cv2_img[...,::-1].astype(np.float32) # Converts from GBR to RGB\n\n\ttensor = tf.convert_to_tensor(img_to_array(img))\n\ttensor = tensor / 255\n\n\treturn tensor", "def GetFloat(start, numBytes, ens):\n try:\n return struct.unpack(\"f\", ens[start:start + numBytes])[0]\n except Exception as e:\n logging.debug(\"Error creating a float from bytes. \" + str(e))\n return 0.0", "def data_to_frame(self, data):\n frame = numpy.fromstring(data, dtype=numpy.uint8)\n frame = numpy.reshape(frame, (self.height, self.width, 3))\n return frame", "def read(self):\r\n\t\t# get data from camera\r\n\t\tarray = self.ueye.get_data(self.pcImageMemory, self.width, self.height, self.nBitsPerPixel, self.pitch, copy=False)\r\n\t\t# get frame as numpy array\r\n\t\tframe = np.reshape(array,(self.height.value, self.width.value, self.bytes_per_pixel))\r\n\t\t\r\n\t\t\"\"\"\r\n\t\tcamera_matrix = np.array([\r\n\t\t\t[4.5330796457901283e+02, 0., 6.1902229288626302e+02],\r\n\t\t\t[0., 4.5369175559310276e+02, 5.1298362120979994e+02],\r\n\t\t\t[0., 0., 1.]])\r\n\t\t\r\n\t\tdist_coeffs = np.array([\r\n\t\t\t-3.1812973406286371e-01, 9.6396352148682182e-02,\r\n\t\t\t2.9601124432187590e-03, 9.7700591472463412e-04,\r\n\t\t\t-1.1929681608809075e-02\r\n\t\t])\r\n\r\n\t\tframe = cv2.undistort(frame, camera_matrix, dist_coeffs, camera_matrix)\r\n\t\t\"\"\"\r\n\r\n\t\treturn frame", "def preprocess_frame(self, frame):\n # Greyscale frame\n img = np.mean(frame,-1)\n\n # Remove black bar at the bottom\n cropped_img = img[:-12, :]\n\n # Normalize Pixel Values\n normalized_frame = cropped_img/255.0\n\n return normalized_frame", "def get_initial_frame(self, memory):\n B = memory.size(0)\n decoder_input = Variable(memory.data.new(\n B, self.n_mel_channels).zero_())\n # print(decoder_input.size())\n return decoder_input", "def _eight_byte_real(value):\n if value == 0:\n return b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n if value < 0:\n byte1 = 0x80\n value = -value\n else:\n byte1 = 0x00\n fexp = numpy.log2(value) / 4\n exponent = int(numpy.ceil(fexp))\n if fexp == exponent:\n exponent += 1\n mantissa = int(value * 16.0 ** (14 - exponent))\n byte1 += exponent + 64\n byte2 = mantissa // 281474976710656\n short3 = (mantissa % 281474976710656) // 4294967296\n long4 = mantissa % 4294967296\n return struct.pack(\">HHL\", byte1 * 256 + byte2, short3, long4)", "def read_uint8(self):\n bytes = self.data[:1]\n value = struct.unpack('!B',bytes)[0]\n self.data = self.data[1:]\n return value", "def _eight_byte_real(value):\n if value == 0:\n return b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n if value < 0:\n byte1 = 0x80\n value = -value\n else:\n byte1 = 0x00\n fexp = numpy.log2(value) / 4\n exponent = int(numpy.ceil(fexp))\n if fexp == exponent:\n exponent += 1\n mantissa = int(value * 16.**(14 - exponent))\n byte1 += exponent + 64\n byte2 = (mantissa // 281474976710656)\n short3 = (mantissa % 281474976710656) // 4294967296\n long4 = mantissa % 4294967296\n return struct.pack(\">HHL\", byte1 * 256 + byte2, short3, long4)", "def float32_to_float8e4m3( # pylint: disable=too-many-statements\n fval: float,\n scale: float = 1.0,\n fn: bool = True,\n uz: bool = False,\n saturate: bool = True,\n) -> int:\n if not fn:\n raise NotImplementedError(\n \"float32_to_float8e4m3 not implemented with fn=False.\"\n )\n x = fval / scale\n b = int.from_bytes(struct.pack(\"<f\", np.float32(x)), \"little\")\n ret = (b & 0x80000000) >> 24 # sign\n if uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x80\n if np.isinf(x):\n if saturate:\n return ret | 127\n return 0x80\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 116:\n pass\n elif e < 120:\n # denormalized number\n ex = e - 119\n if ex >= -2:\n ret |= 1 << (2 + ex)\n ret |= m >> (21 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (20 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 135:\n # normalized number\n ex = e - 119 # 127 - 8\n if ex == 0:\n ret |= 0x4\n ret |= m >> 21\n else:\n ret |= ex << 3\n ret |= m >> 20\n if m & 0x80000 and ((m & 0x100000) or (m & 0x7FFFF)):\n if (ret & 0x7F) < 0x7F:\n # rounding\n ret += 1\n elif not saturate:\n return 0x80\n elif saturate:\n ret |= 0x7F # 01111110\n else:\n ret = 0x80\n elif m == 0:\n # -0\n ret = 0\n return int(ret)\n else:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x7F | ret\n if np.isinf(x):\n if saturate:\n return ret | 126\n return 0x7F | ret\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 117:\n pass\n elif e < 121:\n # denormalized number\n ex = e - 120\n if ex >= -2:\n ret |= 1 << (2 + ex)\n ret |= m >> (21 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (20 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 136:\n # normalized number\n ex = e - 120\n if ex == 0:\n ret |= 0x4\n ret |= m >> 21\n else:\n ret |= ex << 3\n ret |= m >> 20\n if (ret & 0x7F) == 0x7F:\n ret &= 0xFE\n if (m & 0x80000) and ((m & 0x100000) or (m & 0x7FFFF)):\n if (ret & 0x7F) < 0x7E:\n # rounding\n ret += 1\n elif not saturate:\n ret |= 0x7F\n elif saturate:\n ret |= 126 # 01111110\n else:\n ret |= 0x7F\n return int(ret)", "def process(img):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) \n x_t = cv2.resize(img, (112, 160), interpolation=cv2.INTER_AREA)\n x_t = np.nan_to_num(x_t)\n x_t = cv2.Laplacian(x_t,cv2.CV_8U)\n\n return x_t.astype(np.uint8)", "def Decode(scaleFactorFull,bitAllocFull,mantissaFull,overallScaleFactorFull,codingParams):\n\n if(codingParams.blocksize == 3):\n #print \"MDCTLines: \", codingParams.nMDCTLines\n a = codingParams.longBlockSize/2\n b = codingParams.shortBlockSize/2\n elif (codingParams.blocksize == 2):\n a = codingParams.shortBlockSize/2\n b = a\n elif (codingParams.blocksize == 1):\n b = codingParams.longBlockSize/2\n a = codingParams.shortBlockSize/2\n else:\n a = codingParams.longBlockSize/2\n b = a\n N = a+b\n halfN = N/2\n\n #halfN = codingParams.nMDCTLines\n #N = 2*halfN\n # vectorizing the Dequantize function call\n# vDequantize = np.vectorize(Dequantize)\n data = []\n mdctLines = []\n for iCh in range(codingParams.nChannels):\n\n scaleFactor = scaleFactorFull[iCh]\n bitAlloc = bitAllocFull[iCh]\n mantissa = mantissaFull[iCh]\n overallScaleFactor = overallScaleFactorFull[iCh]\n rescaleLevel = 1.*(1<<overallScaleFactorFull[iCh])\n # reconstitute the first halfN MDCT lines of this channel from the stored data\n mdctLine = np.zeros(halfN,dtype=np.float64)\n iMant = 0\n for iBand in range(codingParams.sfBands.nBands):\n nLines =codingParams.sfBands.nLines[iBand]\n if bitAlloc[iBand]:\n mdctLine[iMant:(iMant+nLines)]=vDequantize(scaleFactor[iBand], mantissa[iMant:(iMant+nLines)],codingParams.nScaleBits, bitAlloc[iBand])\n iMant += nLines\n mdctLine /= rescaleLevel # put overall gain back to original level\n mdctLines.append(mdctLine)\n\n #print codingParams.couplingParams\n if codingParams.doCoupling == True and len(mdctLines[0]) > 128:\n #print len(mdctLines[0])\n mdctLines = np.array(mdctLines)\n # better to just pass codingParams to channelDecoupling?\n mdctLines = ChannelDecoupling(mdctLines,codingParams.coupledChannel,codingParams.couplingParams,codingParams.sampleRate,codingParams.nCouplingStart)\n\n mdctLines = np.array(mdctLines)\n for iCh in range(codingParams.nChannels):\n data.append(np.array([],dtype=np.float64)) # add location for this channel's data\n mdctLine = mdctLines[iCh]\n if codingParams.doSBR == True:\n ### SBR Decoder Module 1 - High Frequency Reconstruction ###\n mdctLine = HiFreqRec(mdctLine,codingParams.sampleRate,codingParams.sbrCutoff)\n ### SBR Decoder Module 2 - Additional High Frequency Components ###\n mdctLine = AddHiFreqs(mdctLine,codingParams.sampleRate,codingParams.sbrCutoff)\n ### SBR Decoder Module 3 - Envelope Adjustment ###\n mdctLine = EnvAdjust(mdctLine,codingParams.sampleRate,codingParams.sbrCutoff,codingParams.specEnv[iCh])\n # print codingParams.specEnv # Print envelope for debugging purposes\n\n # IMDCT and window the data for this channel\n # data = SineWindow( IMDCT(mdctLine, halfN, halfN) ) # takes in halfN MDCT coeffs\n imdct = IMDCT(mdctLine, a, b) # takes in halfN MDCT coeffs\n data[iCh] = np.append(SineWindow(np.append(imdct[:a],np.zeros(a)))[:a],SineWindow(np.append(np.zeros(b),imdct[a:]))[b:])\n #print data.size\n # end loop over channels, return reconstituted time samples (pre-overlap-and-add)\n\n return data", "def convert_image_np(inp):\n inp = inp.numpy().transpose((1, 2, 0))\n inp = (inp*255).astype(np.uint8)\n return inp", "def proj_to_velo(calib_data):\n rect = calib_data[\"R0_rect\"].reshape(3, 3)\n #to transform a point from Lidar framce to camera frame\n #reshape the flat line with 12 elements to 3X4 matrix\n velo_to_cam = calib_data[\"Tr_velo_to_cam\"].reshape(3, 4)\n#print('velo2cam', velo_to_cam)\n inv_rect = np.linalg.inv(rect)\n #select all rows and only first three columns\n#print('velo_to_cam[:, :3]', velo_to_cam[:, :3])\n #select all rows and only first three columns\n inv_velo_to_cam = np.linalg.pinv(velo_to_cam[:, :3])\n return np.dot(inv_velo_to_cam, inv_rect)", "def denormalize(float32_frame):\n if (not isinstance(float32_frame, tf.Tensor) or\n float32_frame.dtype != tf.float32):\n raise ValueError(f\"Invalid input: {float32_frame}\")\n return tf.image.convert_image_dtype(float32_frame, tf.uint8, saturate=True)", "def preprocess_sample(file, params):\n\n videoFile = file + \".mp4\"\n audioFile = file + \".wav\"\n roiFile = file + \".png\"\n visualFeaturesFile = file + \".npy\"\n\n roiSize = params[\"roiSize\"]\n normMean = params[\"normMean\"]\n normStd = params[\"normStd\"]\n vf = params[\"vf\"]\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n #Extract the audio from the video file using the FFmpeg utility and save it to a wav file.\n v2aCommand = \"ffmpeg -y -v quiet -i \" + videoFile + \" -ac 1 -ar 16000 -vn \" + audioFile\n os.system(v2aCommand)\n\n\n #for each frame, resize to 224x224 and crop the central 112x112 region\n captureObj = cv.VideoCapture(videoFile)\n roiSequence = list()\n while (captureObj.isOpened()):\n ret, frame = captureObj.read()\n if ret == True:\n grayed = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n grayed = grayed/255\n grayed = cv.resize(grayed, (224,224))\n roi = grayed[int(112-(roiSize/2)):int(112+(roiSize/2)), int(112-(roiSize/2)):int(112+(roiSize/2))]\n roiSequence.append(roi)\n else:\n break\n captureObj.release()\n cv.imwrite(roiFile, np.floor(255*np.concatenate(roiSequence, axis=1)).astype(np.int))\n\n\n #normalise the frames and extract features for each frame using the visual frontend\n #save the visual features to a .npy file\n inp = np.stack(roiSequence, axis=0)\n inp = np.expand_dims(inp, axis=[1,2])\n inp = (inp - normMean)/normStd\n inputBatch = torch.from_numpy(inp)\n inputBatch = (inputBatch.float()).to(device)\n vf.eval()\n with torch.no_grad():\n outputBatch = vf(inputBatch)\n out = torch.squeeze(outputBatch, dim=1)\n out = out.cpu().numpy()\n np.save(visualFeaturesFile, out)\n return", "def preprocess_input(img):\n img /= 255.\n img -= 0.5\n img *= 2.\n return img", "def _decode_float(fp):\n return struct.unpack('>f', fp.read(4))[0]", "def prepro(I):\n# I = env.reset() # Use this to verify, whats happening\n# plt.imshow(I)\n I = I[35:195] # crop and keep only the play area\n I = I[::2,::2,0] # downsample by factor of 2, take every second row and column, and take only \"R\" component out of RGB image\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (but paddles, ball) just set to 1\n return I.astype(np.float).ravel() # convert to 1D array and return" ]
[ "0.6424506", "0.6271997", "0.6116359", "0.6106592", "0.6086693", "0.6025813", "0.6025813", "0.59201217", "0.586013", "0.5815289", "0.5804463", "0.564739", "0.5637826", "0.56362927", "0.56270623", "0.5616849", "0.5541839", "0.5488203", "0.54803914", "0.54658365", "0.54447323", "0.5426965", "0.5415887", "0.5402738", "0.5395356", "0.5375945", "0.5370641", "0.53697014", "0.53526384", "0.53460836", "0.53396344", "0.5320679", "0.5319991", "0.5313632", "0.53018755", "0.52966624", "0.52899414", "0.52898747", "0.52764666", "0.5275146", "0.5266711", "0.5266711", "0.526034", "0.5248756", "0.52468926", "0.5245522", "0.5231327", "0.5228845", "0.5221602", "0.5212421", "0.5207554", "0.5194684", "0.51800454", "0.51777285", "0.516616", "0.516331", "0.51564413", "0.51526314", "0.5147488", "0.51308954", "0.5126438", "0.51249313", "0.5123749", "0.51209885", "0.51185775", "0.5108277", "0.51077497", "0.5106042", "0.5096754", "0.50942665", "0.50935787", "0.50921494", "0.50910866", "0.50889426", "0.5088735", "0.5087581", "0.5084052", "0.5082787", "0.508199", "0.5080756", "0.5066103", "0.5058284", "0.5050451", "0.504622", "0.50461847", "0.5044763", "0.5043462", "0.50393987", "0.5035152", "0.5026479", "0.5020831", "0.501481", "0.5012715", "0.50107867", "0.5006344", "0.50004447", "0.4999283", "0.49976856", "0.49917975", "0.49723935" ]
0.6721163
0
take 1D float array of rewards and compute discounted reward
def discount_rewards(self, r, gamma): discounted_r = np.zeros_like(r) running_add = 0 for t in reversed(range(0, r.size)): if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!) running_add = running_add * gamma + r[t] discounted_r[t] = running_add return np.array(discounted_r, dtype=np.float64)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def discount_rewards(rewards):\r\n discounted_r = np.zeros_like(rewards)\r\n running_add = 0\r\n for t in reversed(range(0, len(rewards))): \r\n running_add = running_add * reward_discount + rewards[t]\r\n discounted_r[t] = running_add\r\n return discounted_r", "def discount_rewards(rewards, gamma):\n reward_shape = rewards.shape\n if len(reward_shape) == 1:\n discounted_r = np.zeros(shape=(*reward_shape, 1), dtype=np.float)\n else:\n discounted_r = np.zeros(shape=reward_shape, dtype=np.float)\n running_add = 0\n\n for t in reversed(range(0, rewards.size)):\n running_add = running_add * gamma + rewards[t]\n discounted_r[t] = running_add\n\n return discounted_r", "def discount_rewards(r):\n\tdiscounted_r = np.zeros_like(r)\n\trunning_add = 0\n\tfor t in reversed(range(0, r.size)):\n\t\trunning_add = running_add * gamma + r[t]\n\t\tdiscounted_r[t] = running_add\n\treturn np.array(discounted_r)", "def helper_discount_rewards(rewards, discount_rate):\n discounted_rewards = np.zeros(len(rewards))\n cumulative_rewards = 0\n for step in reversed(range(len(rewards))):\n cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate\n discounted_rewards[step] = cumulative_rewards\n return discounted_rewards", "def discount_rewards(r):\n\tdiscounted_r = np.zeros_like(r)\n\trunning_add = 0\n\tfor t in reversed(xrange(0, r.size)):\n\t\trunning_add = running_add * gamma + r[t]\n\t\tdiscounted_r[t] = running_add\n\treturn discounted_r", "def compute_discounted_rewards(self, rewards):\n discounted_rewards = []\n for t in range(len(rewards)):\n Gt = 0\n pw = 0\n for r in rewards[t:]:\n Gt = Gt + self.gamma ** pw * r\n pw = pw + 1\n discounted_rewards.append(Gt)\n\n # Normalization of the discounted rewards\n discounted_rewards = np.array(discounted_rewards)\n discounted_rewards = (discounted_rewards - discounted_rewards.mean()) / (discounted_rewards.std() + 1e-10)\n\n return discounted_rewards", "def discount_rewards(r):\r\n discounted_r = np.zeros_like(r)\r\n running_add = 0\r\n for t in reversed(xrange(0, r.size)):\r\n running_add = running_add * .99 + r[t]\r\n discounted_r[t] = running_add \r\n return discounted_r", "def computed_discounted_rewards(rewards: list[float], gamma: float = 0.95) -> float:\n discounted_reward = 0\n for i, r in enumerate(rewards):\n discounted_reward += gamma**i * r\n return discounted_reward", "def discount(rewards):\n sum_so_far = 0.0\n rewards_so_far = []\n for r in rewards[::-1]:\n sum_so_far = sum_so_far * gamma_discount + r\n rewards_so_far.append(sum_so_far)\n return rewards_so_far[::-1]", "def discount_rewards(r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(r.size)):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def discount_rewards(r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(0, r.size)):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def discount_rewards(r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(xrange(0, r.size)):\n running_add = running_add * gamma_099 + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def discount_rewards(r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(xrange(0, r.size)):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def discount_rewards(r):\r\n discounted_r = np.zeros_like(r)\r\n running_add = 0\r\n for t in reversed(range(0, r.size)):\r\n running_add = running_add * gamma + r[t]\r\n discounted_r[t] = running_add\r\n return discounted_r", "def discount_rewards(self, r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(0, r.size)):\n if r[t] != 0:\n running_add = 0 # Pong-specific\n running_add = running_add * self.gamma + r[t]\n discounted_r[t] = running_add\n \n #print(\"Mean reward before normalized: {}\".format(np.mean(discounted_r)))\n mu = np.mean(discounted_r)\n var = np.var(discounted_r)\n discounted_r -= mu \n discounted_r /= np.sqrt(var+1e-6)\n return discounted_r", "def discount_rewards(r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(0, r.size)):\n if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def discount_rewards(r):\r\n discounted_r = np.zeros_like(r)\r\n running_add = 0\r\n for t in reversed(xrange(0, r.size)):\r\n running_add = running_add * gamma + r[t]\r\n discounted_r[t] = running_add\r\n return discounted_r", "def discount_rewards_and_normalize(self, rewards):\n discounted_rewards = np.empty(len(rewards))\n cumulative_rewards = 0\n\n for step in reversed(range(len(rewards))):\n cumulative_rewards = rewards[step] + cumulative_rewards * self.gamma\n discounted_rewards[step] = cumulative_rewards\n\n reward_mean = discounted_rewards.mean()\n reward_std = discounted_rewards.std()\n\n return [(reward - reward_mean) / reward_std\n for reward in discounted_rewards]", "def discount_and_normalize_rewards(episode_rewards):\n # Get empty array with the same size as the rewards array\n discounted_episode_rewards = np.zeros_like(episode_rewards)\n\n # Variable that stores value of the discounted reward being calculated by the loop\n current_reward = 0.0\n # Loop that does the magic\n for i in reversed(range(len(episode_rewards))):\n # Calculate the discounted reward\n current_reward = current_reward * gamma + episode_rewards[i]\n # Store it in the array\n discounted_episode_rewards[i] = current_reward\n\n # Normalize.\n mean = np.mean(discounted_episode_rewards)\n std = np.std(discounted_episode_rewards)\n discounted_episode_rewards = (discounted_episode_rewards - mean) / (std)\n\n return discounted_episode_rewards", "def loss_rewards(r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(0, r.size)):\n #running_add = running_add * gamma + r[t]\n #discounted_r[t] = running_add\n discounted_r[t] = 0\n return discounted_r", "def discount_rewards(r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(xrange(0, r.size)):\n if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific)\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def discounted_reward(self, discount):\n\n tl = len(self)\n return (1 - discount) * np.sum(discount ** np.arange(tl) * self.rewards)", "def discount(rewards, discount_factor=.99):\n # Compute discounted rewards (trust me this works and hopefully it's super fast)\n timesteps = len(rewards) # make into matrix\n rewards = tf.convert_to_tensor([rewards],dtype=tf.float32)\n # create lower triangular matrix of discount_factor weights\n T = tf.convert_to_tensor([[max(1+i-j,0) for j in range(timesteps)] for i in range(timesteps)],dtype=tf.float32)\n T = tf.math.pow(discount_factor, T)\n T = tf.linalg.band_part(T, -1, 0)\n # apply discount factor\n return tf.matmul(rewards, T)", "def discount_reward(reward, gamma):\n discount_r = np.zeros_like(reward)\n r_total = 0\n for _ in reversed(range(0, reward.size)):\n if reward[_] != 0:\n r_total = 0\n r_total = r_total * gamma + reward[_]\n discount_r[_] = r_total\n return discount_r", "def discount_and_normalize_rewards(self, episode_rewards):\n # Get empty array with the same size as the rewards array\n discounted_episode_rewards = np.zeros_like(episode_rewards)\n\n # Variable that stores value of the discounted reward being calculated by the loop\n current_reward = 0.0\n # Loop that does the magic\n for i in reversed(range(len(episode_rewards))):\n # Calculate the discounted reward\n current_reward = current_reward * gamma + episode_rewards[i]\n # Store it in the array\n discounted_episode_rewards[i] = current_reward\n\n # Normalize.\n mean = np.mean(discounted_episode_rewards)\n std = np.std(discounted_episode_rewards)\n discounted_episode_rewards = (discounted_episode_rewards - mean) / (std)\n\n return discounted_episode_rewards", "def discount_rewards(r, gamma):\n #print(\"r\", r)\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(xrange(0, r.size)):\n #print(\"t\", t)\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def discount(self, rewards, dones, gamma):\n discounted = []\n ret = 0\n for reward, done in zip(rewards[::-1], dones[::-1]):\n ret = reward + gamma * ret * (1. - done)\n discounted.append(ret)\n return discounted[::-1]", "def discounted_rewards(r, gamma):\n discounted_r = np.zeros_like(r)\n running_sum = 1\n for i in reversed(range(0,len(r))):\n discounted_r[i] = running_sum * r[i]\n running_sum = discounted_r[i]\n return list(discounted_r)", "def discount_and_normalize_rewards(all_rewards, discount_rate):\n all_discounted_rewards = []\n for rewards in all_rewards:\n all_discounted_rewards.append(helper_discount_rewards(rewards, discount_rate))\n\n flat_rewards = np.concatenate(all_discounted_rewards)\n reward_mean = flat_rewards.mean()\n reward_std = flat_rewards.std()\n return [(discounted_rewards - reward_mean) / reward_std for discounted_rewards in all_discounted_rewards]", "def discount_rewards(r, gamma):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(0, r.size)):\n running_add = running_add * gamma + r [t]\n discounted_r [t] = running_add\n return discounted_r", "def _discount_rewards(self, non_discounted_rewards):\n discounted_rewards = [0.0] * len(non_discounted_rewards)\n total_rewards = 0\n for t in reversed(range(len(non_discounted_rewards))):\n total_rewards = total_rewards * self.discount_factor + non_discounted_rewards[t]\n discounted_rewards[t] = total_rewards\n return discounted_rewards", "def get_cumulative_rewards(rewards, # rewards at each step\n gamma=0.99 # discount for reward\n ):\n\n cumulative_rewards = np.empty_like(rewards)\n cumulative_rewards = cumulative_rewards.astype(float)\n cumulative_rewards[-1] = rewards[-1]\n\n for index in range(len(rewards) - 2, -1, -1):\n discount = cumulative_rewards[index + 1] * gamma\n reward = rewards[index]\n cumulative_rewards[index] = discount + reward\n\n return cumulative_rewards # <array of cumulative rewards>", "def discount_rewards(r, gamma=0.99):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(xrange(0, r.size)):\n if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def discount_rewards(rwd, gamma=0.99):\n discounted_r = np.zeros_like(rwd)\n running_add = 0\n for t in range(len(rwd)-1, -1, -1):\n running_add = running_add * gamma + rwd[t]\n discounted_r[t] = running_add\n\n return discounted_r", "def discount_rewards(self, r, epEnd):\n res = np.zeros_like(r)\n running_add = 0.0\n assert (len(r)-1) in epEnd, 'simple sanity check: the last reward must be the episode end'\n assert isinstance(r, list)\n for t in reversed(xrange(len(r))):\n if t in epEnd: running_add = 0.0 # reset \n running_add = GAMMA * running_add + r[t]\n res[t] = running_add\n return res", "def _discounted_cumsum(self, rewards, rate=None):\n # HINT1: note that each entry of the output should now be unique,\n # because the summation happens over [t, T] instead of [0, T]\n # HINT2: it is possible to write a vectorized solution, but a solution\n # using a for loop is also fine\n rate = self.gamma if rate is None else rate\n\n rewards = np.array(rewards)\n disounted_return = list(\n accumulate(rewards[::-1], lambda ret, rew: rate * ret + rew))\n disounted_return = np.array(disounted_return)[::-1]\n return disounted_return", "def discount_with_dones(rewards, dones, gamma):\n discounted = []\n r = 0\n\n # discounted rewards are calculated on the reversed reward list.\n # that returns are calculated in descending order is easy to\n # overlook in the original pseudocode.\n # when writing down an example of the pseudocode, it is clear, that\n # r_t + gamma * V(s_tp1) is calculated for each list element and\n # this is also what is done here.\n for reward, done in zip(rewards[::-1], dones[::-1]):\n r = reward + gamma*r*(1.-done)\n discounted.append(r)\n return discounted[::-1]", "def reward(input):\n state = np.array([input[0], input[1]])\n action = input[2]\n action = np.clip(action, -2.0, 2.0)\n costs = angle_normalize(state[0])**2 + .1 * state[1]**2 + .001 * (action**2)\n\n return - costs", "def reward_calc(self, reward_traj,V,V_end):\n r_all = np.concatenate((reward_traj,[V_end]),-1)\n V_all = V #np.concatenate((V,[V_end]),-1)\n delta = r_all[:-1] + self.gamma * V_all[1:] - V_all[:-1]\n \n adv = Generalized_Adv_Estimator.discounted_sum(delta,self.gamma*self.lam)\n rtg = adv + V_all[:-1]\n\n adv = adv.astype('float32')\n rtg = rtg.astype('float32')\n\n return adv, rtg", "def reward_amt(value, reward_vec, adj, softmax_inv_temp, discount, start_prob=None):\n n = len(reward_vec)\n softmax_value = np.exp(softmax_inv_temp * value) / np.sum(np.exp(softmax_inv_temp*value))\n policy = adj * softmax_value.reshape(1, -1)\n policy = util.l1_normalize_rows(policy)\n sr = np.linalg.pinv(np.eye(n) - discount * policy)\n value = np.dot(sr, reward_vec.reshape(-1, 1)).reshape(-1)\n if start_prob is None:\n start_prob = np.ones(n)*1. / n\n else:\n start_prob = start_prob.reshape(n)\n return np.sum(value * start_prob)", "def discount_rewards(r, gamma=0.99, value_next=0.0):\n discounted_r = np.zeros_like(r)\n running_add = value_next\n for t in reversed(range(0, r.size)):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def discount_rewards(r, gamma=0.99, value_next=0.0):\n discounted_r = np.zeros_like(r)\n running_add = value_next\n for t in reversed(range(0, r.size)):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def _compute_reward(self):\n reward = 0.0\n return reward", "def average_reward(self):\n T = len(self)\n return np.sum(self.rewards / T)", "def compute_reward(self, done: np.ndarray) -> np.ndarray:\n rewards = [0.0 for i in range(self.agents)]\n rewarded_fields = defaultdict(float)\n if done[0][0]:\n # Get rewards for all rew fields with agents on\n for agent in range(self.agents):\n pos = self.game.get_agent_pos(agent)\n if pos in self.payoff_fields:\n rewarded_fields[pos] += 1.0\n\n for agent in range(self.agents):\n pos = self.game.get_agent_pos(agent)\n rewards[agent] += rewarded_fields.get(pos, 0.0)\n if agent < self.num_informed and pos in self.special_payoff_fields:\n rewards[agent] *= 2\n\n rewards = np.array(rewards).reshape(1, -1)\n return rewards", "def _dense_reward(self) -> float:\n y = 1\n target_goal_dists = []\n for target_shape in self.__debris_shapes:\n target_pos = target_shape.shape_body.position\n goal_pos = (target_pos[0], y) # Top of screen.\n dist = np.linalg.norm(target_pos - goal_pos)\n if target_pos[1] > 0.88:\n dist = 0\n target_goal_dists.append(dist)\n target_goal_dists = np.mean(target_goal_dists)\n return -1.0 * target_goal_dists", "def _compute_reward_(self):\n if self._target_type == \"position\":\n dist = np.linalg.norm(self._target_diff_, ord=2)\n if self._reward_type == \"linear\":\n reward_dist = -dist\n elif self._reward_type == \"precision\":\n reward_dist = -dist +\\\n np.exp( -dist**2 / 0.01)\n elif self._reward_type == \"sparse\":\n if dist < 0.05:\n reward_dist = 0\n else:\n reward_dist = -0.1\n\n elif self._target_type == \"angle\":\n dist = np.linalg.norm(self._target_diff_, ord=1)\n if self._reward_type == \"linear\":\n reward_dist = -dist\n elif self._reward_type == \"precision\":\n reward_dist = -dist +\\\n np.exp(-dist ** 2 / 0.01)\n elif self._reward_type == \"sparse\":\n raise NotImplementedError\n\n # TODO: doublecheck whether '0' or '-1' should be used as the index\n reward_vel = -self._vel_penalty * np.square(self._qd_[-1, self._joint_indices]).sum()\n\n #self.info['reward_dist'] = reward_dist\n #self.info['reward_vel'] = reward_vel\n\n return (reward_dist + reward_vel) * self._dt / 0.008", "def loss(self, states, actions, next_states, rewards, discount_rate=.99):\n # TODO: implement this\n actions = tf.cast(actions, tf.int64)\n a = tf.stack([tf.range(states.shape[0],dtype=tf.int64), actions], axis=1)\n qVals = tf.gather_nd(self.call(states), a) # [batch_size] q-values for each action\n nextVals = tf.reduce_max(self.call(next_states), axis=1) # max of q-values [batch_size, num_actions] across num_actions\n targetVals = rewards + (discount_rate*nextVals)\n loss = tf.reduce_sum(tf.math.square(qVals - targetVals))\n return loss", "def reward(self, reward):\n return float(np.sign(reward))", "def get_reward(self, dags, entropies,inputs,targets):\n if not isinstance(entropies, np.ndarray):\n entropies = entropies.data.cpu().numpy()\n\n score=self.get_score(inputs,targets,dags)\n #score=1-self.get_loss(inputs,targets,dags)\n print(score.item())\n R = utils.to_item(score.data)\n\n if self.args.entropy_mode == 'reward':\n rewards = R + self.args.entropy_coeff * entropies.mean()\n elif self.args.entropy_mode == 'regularizer':\n rewards = R * np.ones_like(entropies)\n else:\n raise NotImplementedError(f'Unkown entropy mode: {self.args.entropy_mode}')\n\n return rewards", "def reduce_rewards(self,rewards):\n reduced_rewards = np.zeros((len(rewards),1))\n gamma_multipliers = np.zeros((len(rewards),1))\n \n for i in range(len(rewards)):\n gammas = [GAMMA**t for t in range(len(rewards[i]))]\n \n reduced_rewards[i][0] = np.sum(np.multiply(gammas, rewards[i]))\n gamma_multipliers[i][0] = GAMMA**(len(rewards[i]))\n \n return reduced_rewards, gamma_multipliers", "def reward(self, reward):\r\n return np.sign(reward)", "def get_reward(self, observations, actions):\n\n #initialize and reshape as needed, for batch mode\n self.reward_dict = {}\n if(len(observations.shape)==1):\n observations = np.expand_dims(observations, axis = 0)\n actions = np.expand_dims(actions, axis = 0)\n batch_mode = False\n else:\n batch_mode = True\n\n #get vars\n xvel = observations[:, 9].copy()\n body_angle = observations[:, 2].copy()\n front_leg = observations[:, 6].copy()\n front_shin = observations[:, 7].copy()\n front_foot = observations[:, 8].copy()\n zeros = np.zeros((observations.shape[0],)).copy()\n\n # ranges\n leg_range = 0.2\n shin_range = 0\n foot_range = 0\n penalty_factor = 10\n\n #calc rew\n self.reward_dict['run'] = xvel\n\n front_leg_rew = zeros.copy()\n front_leg_rew[front_leg>leg_range] = -penalty_factor\n self.reward_dict['leg'] = front_leg_rew\n\n front_shin_rew = zeros.copy()\n front_shin_rew[front_shin>shin_range] = -penalty_factor\n self.reward_dict['shin'] = front_shin_rew\n\n front_foot_rew = zeros.copy()\n front_foot_rew[front_foot>foot_range] = -penalty_factor\n self.reward_dict['foot'] = front_foot_rew\n\n # total reward\n self.reward_dict['r_total'] = self.reward_dict['run'] + self.reward_dict['leg'] + self.reward_dict['shin'] + self.reward_dict['foot']\n\n #return\n dones = zeros.copy()\n if(not batch_mode):\n return self.reward_dict['r_total'][0], dones[0]\n return self.reward_dict['r_total'], dones", "def discount_rewards(self,x, gamma):\n result = [0 for i in range(len(x))]\n element = 0\n for i in range(len(x)-1, -1, -1): #-2\n element = x[i] + gamma * element\n result[i] = element\n\n return result", "def reward(self, reward):\n return np.sign(reward)", "def reward(self, reward):\n return np.sign(reward)", "def reward(self, reward):\n return np.sign(reward)", "def reward(self, reward):\n return np.sign(reward)", "def reward(self, reward):\n return np.sign(reward)", "def _returns_advantages(self, rewards, dones, values, next_value):\n\t\treturns = np.append(np.zeros_like(rewards), next_value, axis=-1)\n\t\t\n\t\tfor t in reversed(range(rewards.shape[0])):\n\t\t\treturns[t] = rewards[t] + self.gamma * returns[t + 1] * (1 - dones[t])\n\t\t\n\t\treturns = returns[:-1]\n\t\tadvantages = returns - values\n\n\t\treturn returns, advantages", "def _reward(self, i, rewards, reward=1):\n for j,a in enumerate(self.agents):\n if a.index==i or a.index==0:\n rewards[j]+=reward\n if self.zero_sum:\n if a.index!=i or a.index==0:\n rewards[j] -= reward", "def discount_reward(r_dic, gamma):\n r = 0\n for i in range(len(r_dic) - 1, -1, -1):\n if r_dic[i] != 0:\n r = r_dic[i]\n else:\n r = r * gamma\n r_dic[i] = r\n r_dic = (r_dic - r_dic.mean()) / (r_dic.std() + 1e-8)\n return r_dic", "def compute_reward(self, obs, action, state):\n pass", "def get_rewards(self):\n return np.array(self.rewards)", "def _compute_reward(self):\n last_score = self.episode_qualities[-2]\n new_score = self.episode_qualities[-1]\n reward = new_score - last_score\n return reward", "def total_reward(self):\n return np.sum(self.rewards)", "def test_calculate_returns(self):\n rewards = np.array([[0, 1, 0, 1],\n [0, 1, 0, 1],\n [0, 1, 1, 1],\n [1, 1, 0, 1]], dtype=np.float32) # pyformat:disable\n discount = 0.5\n masks = np.array([[1, 1, 1, 1],\n [1, 1, 1, 1],\n [1, 1, 1, 0],\n [1, 1, 0, 0]], dtype=np.bool) # pyformat: disable\n\n calculated_returns = rl_utils.compute_discounted_return(\n rewards, discount, masks)\n\n self.assertEqual(calculated_returns.shape, (4, 4))\n\n expected_returns = np.array([[0.125, 1.875, 0.25, 1.5],\n [0.25, 1.75, 0.5, 1.0],\n [0.5, 1.5, 1.0, 0.0],\n [1.0, 1.0, 0.0, 0.0]]) # pyformat: disable\n self.assertTrue(np.allclose(expected_returns, calculated_returns))", "def accumulate_rewards(rewards, gamma):\n reversed_rewards = rewards[::-1] # list reversal\n acc = list(itertools.accumulate(reversed_rewards, lambda x, y: x*gamma + y))\n return np.array(acc[::-1])", "def reward(self, observation, action, reward):\n self.counts[action] = self.counts[action] + 1\n n = self.counts[action]\n value = self.values[action]\n \n new_value = ((n - 1) / float(n)) * value + (1 / float(n)) * reward\n self.values[action] = new_value\n self.minmax = max(self.values) - min(self.values)\n \n \n pass", "def compute_returns(rewards, gamma=1.0):\n returns = []\n time_step = len(rewards)\n for i in range(time_step):\n curr_ret = 0\n for j in range(i,time_step):\n curr_ret += rewards[j] * gamma**(j-i)\n returns.append(curr_ret)\n return returns", "def cum_reward(self, reward_list):\n reward = 0.\n for rew in reward_list[::-1]:\n reward += rew * self.gamma\n return reward", "def reward_function(self):\r\n def R(state, decision, nodes):\r\n return -1.0/1000*nodes['G'].get_preds_value(state)*(decision['G:R_1']+decision['G:L'])\r\n \r\n return R", "def get_reward_function(self):\n R_fn = np.zeros(self.n_states)\n R_fn[0] = 1.0\n\n return R_fn", "def reward_to_go(rews):\n n = len(rews)\n rtgs = np.zeros_like(rews)\n for i in reversed(range(n)):\n rtgs[i] = rews[i] + (rtgs[i+1] if i+1 < n else 0)\n return rtgs", "def compute_intrinsic_reward(self, next_obs):\r\n next_obs = torch.tensor(next_obs, dtype=torch.float, device=self.device)\r\n #next_obs = torch.FloatTensor(next_obs).to(self.device)\r\n\r\n target_next_feature = self.rnd.target(next_obs)\r\n predict_next_feature = self.rnd.predictor(next_obs)\r\n intrinsic_reward = (target_next_feature - predict_next_feature).pow(2).mean(1) ### MSE --- Issues\r\n #intrinsic_reward = (target_next_feature - predict_next_feature).pow(2).sum(1) / 2\r\n\r\n return intrinsic_reward.data.cpu().numpy()", "def compute_returns(rewards, gamma=1.0):\n n = 0\n G = []\n while n < len(rewards):\n G.append(0) \n for i in range(n,len(rewards)):\n G[n] += (gamma**(i-n))*rewards[i]\n n+=1\n\n return G", "def _compute_reward(self): \n reward = -1\n return reward", "def compute_advantage(self, traj_rewards, gamma, reward_to_go):\n disc_rew = []\n for k, rew in enumerate(traj_rewards):\n disc_rew.append(gamma ** k * rew)\n\n num_steps = len(traj_rewards)\n if not reward_to_go:\n return np.repeat(np.sum(disc_rew), num_steps)\n else:\n return np.cumsum(disc_rew[::-1])[::-1]", "def _get_reward(self, normalized_state, normalized_unconstrained_action, normalized_constrained_action):\n denormalized_unconstrained_charge_rate_in_W = self.denormalize_network_output(normalized_unconstrained_action)\n denormalized_constrained_charge_rate_in_W = self.denormalize_network_output(normalized_constrained_action)\n denormalized_state = normalized_state * self.energy_system.stm_train_subsequent_states_stds + self.energy_system.stm_train_subsequent_states_means\n\n cost_of_net_drawn_electricity = self._get_cost_of_net_drawn_electricity_in_euros(denormalized_state, denormalized_constrained_charge_rate_in_W)\n charge_rate_punishment = self._get_punishment_for_excessive_charge_rate(denormalized_unconstrained_charge_rate_in_W)\n soc_punishment = self._get_punishment_for_impossible_resulting_soc(denormalized_state, denormalized_unconstrained_charge_rate_in_W) \n reward = - cost_of_net_drawn_electricity - charge_rate_punishment - soc_punishment\n #tf.summary.scalar('cost_of_net_drawn_electricity in euros', cost_of_net_drawn_electricity) \n #tf.summary.scalar('reward', reward)\n\n return reward, cost_of_net_drawn_electricity", "def compute_returns(rewards, next_value=0.0, gamma=0.99, dones=None):\n if dones is not None:\n masks = 1 - dones\n else:\n masks = np.ones_like(rewards)\n R = next_value\n returns = []\n for step in reversed(range(len(rewards))):\n R = rewards[step] + gamma * R * masks[step]\n returns.insert(0, R)\n\n returns = np.array(returns)\n returns -= returns.mean()\n returns /= returns.std() if np.std(returns) > 0 else 1\n return returns", "def reward(self, X, plays):\n return np.sum(X[plays]) # note that X[plays] is the bandit feedback", "def calculate_return(list_of_reward, gamma):\n G = 0\n for r in reversed(list_of_reward):\n G = gamma * G + r\n\n return G", "def expectations(rewards: np.ndarray,\n values: np.ndarray,\n next_is_terminal: np.ndarray,\n discount_factor=0.95,\n lam=0.95,\n bootstrap_value=0.0):\n\n #\n time_horizon = len(rewards)\n returns = np.zeros(time_horizon, np.float32)\n advantages = np.zeros(time_horizon, np.float32)\n delta = np.zeros(time_horizon, np.float32)\n\n if next_is_terminal[-1]:\n returns[-1] = rewards[-1]\n delta[-1] = rewards[-1] - values[-1]\n else:\n returns[-1] = rewards[-1] + discount_factor * bootstrap_value\n delta[-1] = rewards[-1] + discount_factor * bootstrap_value - values[-1]\n\n advantages[-1] = delta[-1]\n\n for t in reversed(range(time_horizon - 1)):\n if next_is_terminal[t]:\n returns[t] = rewards[t]\n delta[t] = rewards[t] - values[t]\n advantages[t] = delta[t]\n else:\n returns[t] = rewards[t] + discount_factor * returns[t + 1]\n delta[t] = rewards[t] + discount_factor * values[t + 1] - values[t]\n advantages[t] = delta[t] + discount_factor * lam * advantages[t + 1]\n\n return returns, advantages", "def get_cumulative_rewards(rewards, # rewards at each step\r\n gamma=0.99 # discount for reward\r\n ):\r\n cumulative_rewards = []\r\n prev = 0\r\n\r\n for r in reversed(rewards):\r\n prev = r + gamma * prev\r\n cumulative_rewards.append(prev)\r\n cumulative_rewards.reverse()\r\n return cumulative_rewards", "def _reward(self, a):\r\n\r\n xrel = self._body_coord()[0] - self.goal\r\n dist = np.sum(xrel ** 2)\r\n return (\r\n - self.cx * dist / (np.sqrt(dist) + 1) - self.cu * np.sum(a ** 2)\r\n )", "def reward(self,\n state: float) -> float:\n raise NotImplementedError", "def _compute_reward(self, observations, done):\n raise NotImplementedError()", "def _compute_reward(self, observations, done):\n raise NotImplementedError()", "def _compute_reward(self, observations, done):\n raise NotImplementedError()", "def _compute_reward(self, observations, done):\n raise NotImplementedError()", "def _compute_reward(self, observations, done):\n raise NotImplementedError()", "def _compute_reward(self, observations, done):\n raise NotImplementedError()", "def _build_discounted_n_step_rewards(self, gamma):\n discounts = [gamma**i for i in range(self.update_horizon)]\n discount_tensor = tf.constant(discounts)\n return tf.reduce_sum(self._replay.rewards * discount_tensor, axis=1)", "def _get_reward(self):\n\n # compute reward depending on the radius and distance to target\n radius_reward = -(self.radius_sheep*0.9)/self.init_sheep_root\n target_reward = -(self.target_distance*0.9)/self.init_sheep_root\n\n reward = target_reward + radius_reward\n\n # ensure it is always an array\n if not type(reward) is np.ndarray:\n reward = np.array([reward])\n return reward[0]", "def calculate_reward(self):\n if AT.REWARD not in self.attributes:\n return (0, 1)\n return self.attributes[AT.REWARD].calculate(self)", "def _reward(self, action: Action) -> float:\n raise NotImplementedError", "def _calculate_dense_rewards(self, desired_goal, achieved_goal):\n return np.array([]), None", "def compute_reward(self, state, rl_actions, **kwargs):\n raise NotImplementedError", "def get_reward(self, actions, next_states):\n r = []\n for state in next_states:\n ended, winner = self.judge_terminal(state)\n if ended:\n r.append(winner)\n else:\n r.append(0)\n return np.array(r)", "def update(experience_buffer, returns):\n rewards = np.array(experience_buffer[2])\n discount_rewards = rewards * (FLAGS.GAMMA ** np.arange(len(rewards)))\n current_return = discount_rewards.sum()\n returns.append(current_return)\n returns = returns[-100:] # Get recent 100 returns.\n baseline = sum(returns) / len(returns) # Baseline is the average of 100 returns.\n sess.run(train_op, {observation_: experience_buffer[0],\n action_: experience_buffer[1],\n advantage_: current_return - baseline}) \n return returns" ]
[ "0.8100923", "0.78397125", "0.7792625", "0.7742053", "0.7653779", "0.75987715", "0.7583268", "0.75820553", "0.7579539", "0.7536054", "0.7519005", "0.75047183", "0.7467692", "0.7456702", "0.7433794", "0.7426565", "0.74062765", "0.73950297", "0.7370545", "0.73564714", "0.7328519", "0.72018427", "0.7201784", "0.71919143", "0.70810694", "0.703724", "0.701953", "0.699616", "0.6977477", "0.68896544", "0.6853656", "0.6798218", "0.67952156", "0.67714196", "0.66732097", "0.6651138", "0.6589912", "0.6545616", "0.64838254", "0.6442654", "0.644177", "0.644177", "0.64416486", "0.64413583", "0.64091456", "0.6403475", "0.63985944", "0.6387154", "0.6366384", "0.6338549", "0.63202435", "0.6315961", "0.621607", "0.6207653", "0.6204095", "0.6204095", "0.6204095", "0.6204095", "0.6204095", "0.618831", "0.61564976", "0.6152739", "0.61422485", "0.6138495", "0.6127187", "0.609262", "0.6088548", "0.6085296", "0.6058265", "0.6054436", "0.60530174", "0.6043095", "0.6034321", "0.60339457", "0.6032641", "0.6029406", "0.60016", "0.60012555", "0.59916455", "0.59905344", "0.59896535", "0.5983475", "0.597715", "0.59710884", "0.59694725", "0.59581375", "0.59563076", "0.59563076", "0.59563076", "0.59563076", "0.59563076", "0.59563076", "0.59522146", "0.59508675", "0.5948057", "0.5941405", "0.5929282", "0.591477", "0.59130543", "0.5912898" ]
0.69414437
29
backward pass. (eph is array of intermediate hidden states)
def policy_backward(self, eph, epx, epdlogp, model_type): db2 = sum(epdlogp)[0] dW2 = np.dot(eph.T, epdlogp).ravel() dh = np.outer(epdlogp, self.model['W2_' + model_type]) dh[eph <= 0] = 0 # backpro prelu db1 = sum(dh) dW1 = np.dot(dh.T, epx) return {'W1_' + model_type: dW1, 'W2_' + model_type: dW2, 'b1_' + model_type: db1, 'b2_' + model_type: db2}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def backward(self, dout):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)-1,-1,-1):\n act_dout = self.activations[l].backward(dout)\n dout = self.layers[l].backward(act_dout)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return", "def backward_pass(self, grad):\n pass", "def fc_backward(dout, cache):\n x, w, b = cache\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the affine backward pass. #\n ########################################################################### \n N = x.shape[0]\n x2d = x.reshape(N, -1)\n \n dx = dout.dot(w.T)\n dx = dx.reshape(x.shape)\n dw = x2d.T.dot(dout)\n db = dout.sum(axis=0) #add from top to down\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db", "def _backward(loss):\n\n loss.backward()", "def backward_D(self):\n self.loss_D.backward()", "def fc_backward(dout, cache):\n x, w, b = cache\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the affine backward pass. #\n ###########################################################################\n dx = np.dot(dout, w.T)\n dw = np.dot(x.T, dout)\n db = np.average(dout, axis = 0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db", "def backward(eph, epdlogp):\n dW2 = np.dot(eph.T, epdlogp).ravel() # which means\n dh = np.outer(epdlogp, model['W2'])\n dh[eph<0] = 0\n dW1 = np.dot(dh.T, epx)\n\n return {'W1':dW1, 'W2':dW2}", "def fc_backward(dout, cache):\n x, w, b = cache\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the affine backward pass. #\n ###########################################################################\n Xshape = x.shape\n x_flat = np.reshape(x,(x.shape[0],-1))\n dx = np.dot(dout,w.T)\n dw = x_flat.T.dot(dout)#np.matmul(x[...,None],dout[:,None,:]).sum(axis=0)\n db = np.sum(dout,axis=0)\n dx = np.reshape(dx,Xshape)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db", "def backward_G(self):\n self.loss_G.backward()", "def backward(cls, grad_out, activated_out):\n raise Exception(\"Unimplemented\")", "def backward(cls, grad_out, activated_out):\n raise Exception(\"Unimplemented\")", "def fc_backward(dout, cache):\n x, w, b = cache\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the affine backward pass. #\n ###########################################################################\n N, d_out = dout.shape\n dx = np.matmul(dout, np.transpose(w))\n dw = np.matmul(np.transpose(x), dout)\n db = np.matmul(np.ones((1,dout.shape[0])), dout).reshape((-1,))\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db", "def rnn_step_backward(dnext_h, cache):\n dx, dprev_h, dWx, dWh, db = None, None, None, None, None\n ##############################################################################\n # TODO: Implement the backward pass for a single step of a vanilla RNN. #\n # #\n # HINT: For the tanh function, you can compute the local derivative in terms #\n # of the output value from tanh. #\n ##############################################################################\n\n x, next_h, prev_h, Wx, Wh, b = cache\n # this is because in vanilla RNN h = tanh(z) and derivative of next_h = tanh(z) = 1-z*z;\n dz = (1-next_h*next_h)*dnext_h\n # THIS ERROR IS SPREAD AMONG THE\n # np.dot(x, Wx) + np.dot(prev_h, Wh) + b)\n dx = np.dot(dz,Wx.T)\n dprev_h = np.dot(dz,Wh.T)\n db = np.sum(dz,axis=0)\n dWx = np.dot(x.T,dz)\n dWh = np.dot(prev_h.T,dz)\n #d(tanh) = 1- tanh*tanh\n\n\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return dx, dprev_h, dWx, dWh, db", "def backward(self, top, propagate_down, bottom):\n for ib in range(2):\n if not propagate_down[ib]:\n continue\n ndim = bottom[0].data.shape\n count = ndim[0] * ndim[2] * ndim[3]\n if not self.count:\n bottom[ib].diff[ ... ] = np.zeros_like( bottom[0].data )\n continue\n if top[0].data < 1.\n bottom[ib].diff[ ... ] = np.abs( bottom[0].data - bottom[1].data )\n bottom[ib].diff[ ... ] *= ( 1 - 1.0*self.iter/self.maxiter )\n else:\n bottom[ib].diff[ ... ] = np.ones_like( bottom[ib].data )\n inop = bottom[0].data < bottom[1].data\n bottom[ib].diff[ inop ] *= -1\n \n # ingore false label and repair\n ignore = bottom[1].data <= 0.\n count -= np.sum(ignore)\n bottom[ib].diff[ignore] = 0.\n #normlist\n bottom[ib].diff[...] /= count", "def one_backward_pass(t,states,init_vals,traces,delta,unit_cube,edges,P):\n \n next_vals = -1e100 * np.ones_like(init_vals)\n \n S,T = states.shape\n \n for i in range(len(states)):\n for j in range(len(unit_cube)):\n# print('State, unitcube subtracted',s,c)\n if (states[i] - unit_cube[j]).min() >= 0:\n \n state_idx = edges[i,j]\n \n tau = np.where(unit_cube[j] == 0)\n \n if len(tau[0]) == 0:\n next_vals[state_idx] = log_sum_exp([next_vals[state_idx],\\\n init_vals[i]+T*np.log(delta)])\n \n else:\n flag = 0\n temp = []\n for k in tau[0]:\n if (t-states[i,k] >=0 and t-states[i,k]< len(traces[k])):\n temp.append(traces[k][t-states[i][k]])\n else:\n flag = 1\n break\n\n if (len(set(temp)) == 1 and flag == 0):\n bit = temp[0]\n next_vals[state_idx] = log_sum_exp([next_vals[state_idx],init_vals[i]\\\n + (T-len(temp)) * np.log(delta)\\\n + np.log(1-delta) * (len(temp))\\\n + np.log(P[t,bit] + 1e-100)])\n \n return next_vals", "def L_model_backward(AL, Y, caches):\n pass", "def on_iter_backward(self, runner):\n runner.optimizer.zero_grad()\n runner.loss.backward()\n runner.optimizer.step()", "def _poputil_block_recompute_backward(op, grads):\n return grads", "def linear_backward(dZ, cache):\n pass", "def backward(self) -> np.ndarray:\n # TODO\n return None", "def _poputil_recompute_backward(op, grads):\n return grads", "def conv_backward_naive(dout, cache):\n dx, dw, db = None, None, None\n #############################################################################\n # TODO: Implement the convolutional backward pass. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx, dw, db", "def backward(self, *output_grads):\n raise NotImplementedError", "def rnn_backward(dh, cache):\n dx, dh_prev, dWx, dWh, db = None, None, None, None, None\n ##############################################################################\n # TODO: Implement the backward pass for a vanilla RNN running an entire #\n # sequence of data. You should use the rnn_step_backward function that you #\n # defined above. #\n ##############################################################################\n \"\"\"\n x, next_h, prev_h, Wx, Wh, b = cache\n dz = (1-next_h*next_h)*dnext_h\n # THIS ERROR IS SPREAD AMONG THE\n # np.dot(x, Wx) + np.dot(prev_h, Wh) + b)\n dx = np.dot(dz,Wx.T)\n dprev_h = np.dot(dz,Wh.T)\n db = np.sum(dz,axis=0)\n dWx = np.dot(x.T,dz)\n dWh = np.dot(prev_h.T,dz)\n #d(tanh) = 1- tanh*tanh\n \"\"\"\n #pdb.set_trace()\n # dh is not result of forward prop\n # but\n N,T,H = dh.shape\n tmp_x, tmp_next_h, tmp_prev_h, tmp_Wx, tmp_Wh, tmp_b = cache[T-1]\n D = tmp_x.shape[1]\n\n\n dx = np.zeros((N,T,D))\n dh_prev = np.zeros((N,H))\n dWx = np.zeros((D,H))\n dWh = np.zeros((H,H))\n db = np.zeros((H))\n\n for i in reversed(list(range(0,T))):\n # current gradient at timestep is the upstream gradient (provided as input)\n # this may be coming from the Y as in the min_char_rnn.py (see line 59)\n # + downstream gradient provided by rnn_step_backward.\n dh_curr = dh[:,i,:] + dh_prev\n dx_, dh_prev, dWx_, dWh_, db_ = rnn_step_backward(dh_curr, cache[i])\n dWx += dWx_\n dWh += dWh_\n db += db_\n dx[:,i,:]=dx_\n\n\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return dx, dh_prev, dWx, dWh, db", "def lstm_step_backward(dnext_h, dnext_c, cache):\n dx, dh, dc, dWx, dWh, db = None, None, None, None, None, None\n #############################################################################\n # TODO: Implement the backward pass for a single timestep of an LSTM. #\n # #\n # HINT: For sigmoid and tanh you can compute local derivatives in terms of #\n # the output value from the nonlinearity. #\n #############################################################################\n\n x, i, f, o, g, prev_h, next_h, prev_c, next_c, Wx, Wh, b = cache\n\n #next_h = o * tanh(next_c)\n #next_c = f *prev_c + i*g\n # derivative of next_c with respect to h\n tanh_nc = np.tanh(next_c)\n dnext_c_h = (1- tanh_nc*tanh_nc) *o*dnext_h\n dc = dnext_c_h + dnext_c\n di = g * dc\n df = prev_c * dc\n do = np.tanh(next_c) * dnext_h\n dg = i * dc\n\n # i = sigmoid(z) -> dz = (1-i)(i) di; di is the\n dz_i = (1-i)*(i) * di\n dz_f = (1-f)*(f) * df\n dz_o = (1-o)*(o) * do\n dz_g = (1-g*g) * dg\n\n dz = np.hstack((dz_i,dz_f,dz_o,dz_g)).T\n\n dx = np.dot(Wx, dz).T\n dprev_h = np.dot(Wh,dz).T\n dprev_c = f * dc\n dWx = np.dot(dz, x).T\n dWh = np.dot(dz, prev_h).T\n db = np.sum(dz, axis=1)\n\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return dx, dprev_h, dprev_c, dWx, dWh, db", "def backward(self, grad, index):\n pass", "def affine_backward(dout, cache):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n x,w,b=cache\n \n dx=np.dot(dout,(w.transpose()))\n D=w.shape[0]\n N=x.shape[0]\n dw=np.dot(x.transpose(),dout)\n db=np.dot(dout.transpose(),np.ones(N))\n return dx,dw,db", "def backward_D(self):\n base_function._unfreeze(self.net_D)\n #print(self.input_P2.shape, self.img_gen.shape)\n self.loss_dis_img_gen = self.backward_D_basic(self.net_D, self.input_P2, self.img_gen)", "def rnn_backward(self, dh, cache):\n self.init_backprop()\n assert dh.shape[1:] == (self.sequence_length, self.hidden_size)\n dh = dh.transpose(1, 0, 2) # Switching to time major\n upstream_grad = np.zeros_like(dh[-1])\n for dh_item, cache_item in reversed(list(zip(dh, cache))):\n upstream_grad = self.rnn_step_backward(dh_item + upstream_grad, cache_item)\n\n return self.dU, self.dW, self.db", "def forward_backward(observations):\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE\n #\n # observations = [(4, 3), (4, 2), (3, 2), (4, 0), (2, 0), (2, 0), (3, 2), \n # (4, 2), (2, 3), (3, 5)]\n num_time_steps = len(observations)\n forward_messages = [None] * num_time_steps\n forward_messages[0] = prior_matrix\n # # # TODO: Compute the forward messages\n for i,x_i in enumerate(observations):\n if x_i:\n obs_index = obs_state_index_map[x_i]\n pi_0 = forward_messages[i]\n # print(len(B[:obs_index]))\n weights = np.multiply(pi_0, B[:,obs_index])\n # x = sum([A[j,:]* w_i.T for j,w_i in enumerate(weights)])\n else:\n weights = forward_messages[i]\n # print(weights)\n x = sum([A[j,:]* w_i.T for j,w_i in enumerate(weights)])\n if i+1 < len(forward_messages):\n forward_messages[i+1] = x#normalize(x)\n # break\n\n ## forward messages as dictionary\n # for_dict = [None]*num_time_steps\n # for j,f in enumerate(forward_messages):\n # x = Distribution()\n # for i,x_i in enumerate(f):\n # if x_i == 0:\n # continue\n # # print(i,x_i)\n # x[all_possible_hidden_states[i]] = x_i\n # for_dict[j] = x.renormalize()\n # print(for_dict[3])\n\n # print('--------------\\n-----------------\\n')\n\n\n backward_messages = [None] * num_time_steps\n # backward_messages[-1] = [1]*len(prior_matrix)\n message = np.ones(len(all_possible_hidden_states), dtype=np.float64)\n backward_messages[-1] = message/len(all_possible_hidden_states)\n \n# ****\n ## Backwards messages\n for i,x_i in enumerate(reversed(observations)):\n # print(x_i)\n if x_i:\n obs_index = obs_state_index_map[x_i]\n pi = backward_messages[-1-i]\n weights = np.multiply(pi, B[:,obs_index])\n else:\n weights = backward_messages[-1-i]\n # print(i)\n x = sum([A[:,j]*w_i for j,w_i in enumerate(weights)])\n\n if i+1 < len(backward_messages):\n backward_messages[-2-i] = x#normalize(x)\n\n ## backward messages as dictionary\n # back_dict = [None]*num_time_steps\n # for j,b in enumerate(backward_messages):\n # x = Distribution()\n # if b == None:\n # continue\n # for i,x_i in enumerate(b):\n # if x_i == 0 or x_i==None:\n # continue\n # # print(i,x_i)\n # x[all_possible_hidden_states[i]] = x_i\n # back_dict[j] = x.renormalize()\n \n # print(back_dict[0])\n # print(A[:10,:10])\n # print('\\n-----------------\\n', B[:10,:10])\n\n # print(backward_messages[2])\n # backward_messages[0] = forward_messages[0]\n # # ## marginals as matrix\n marginals = [None] * num_time_steps \n for i,x_i in enumerate(observations):\n if x_i:\n obs_index = obs_state_index_map[x_i]\n marginals[i] = np.multiply(np.multiply(backward_messages[i],\n forward_messages[i]),\n B[:,obs_index])\n else:\n marginals[i] = np.multiply(backward_messages[i],forward_messages[i])\n # if i == 0:\n # marginals[i] = np.multiply(backward_messages[i], B[:,obs_index])\n # elif i == len(observations)-1:\n # marginals[i] = np.multiply(forward_messages[i], B[:,obs_index])\n # else:\n\n ## marginals as dictionary\n marg_dict = [None]*num_time_steps\n for j,m in enumerate(marginals):\n x = Distribution()\n for i,x_i in enumerate(m):\n if x_i == 0 or x_i==None:\n continue\n x[all_possible_hidden_states[i]] = x_i\n marg_dict[j] = x.renormalize()\n # print(marginals[i])\n # print(A[:10, :10], '\\n')\n # print(B[:10, :10], '\\n')\n # print(marg_dict)\n return marg_dict", "def backward(self, grad_output):\n raise NotImplementedError", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def affine_backward(dout, cache):\r\n x, w, b = cache\r\n x2d = np.reshape(x, (x.shape[0], -1))\r\n\r\n # compute gradients\r\n db = np.sum(dout, axis=0)\r\n dw = np.dot(x2d.T, dout)\r\n dx = np.dot(dout, w.T)\r\n\r\n # reshape dx to match the size of x\r\n dx = dx.reshape(x.shape)\r\n \r\n return dx, dw, db", "def backward_and_step(self, loss):\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()", "def backward(self, top, propagate_down, bottom):\r\n pass", "def affine_backward(dout, cache):\n\tx, w, b = cache\n\tdx, dw, db = None, None, None\n\n\tdx = (dout@w.T).reshape(x.shape)\n\tdw = x.reshape(x.shape[0], w.shape[0]).T@dout\n\tdb = np.sum(dout, axis=0)\n\treturn dx, dw, db", "def forward(self, h_prev, x_t):\n from scipy.special import softmax\n # softmax(arr, axis=0)\n m, i = x_t.shape\n Wi = self.Wh[:i]\n Wh = self.Wh[i:]\n cat = np.concatenate((h_prev, x_t), axis=1)\n # print('meow', cat.shape)\n h_next = np.tanh(cat @ self.Wh + self.bh)\n y = self.softmax(h_next @ self.Wy + self.by)\n return h_next, y\n\n\n\n\n\n\n\n\n\n\n\n m, i = x_t.shape\n U = self.Wh[:i]\n W = self.Wh[i:]\n x = x_t\n T = len(x_t)\n # During forward propagation we save all hidden states in s because need them later.\n # We add one additional element for the initial hidden, which we set to 0\n s = np.zeros((T + 1, len(self.Wh[:self.Wh.shape[1]]) ))\n s[-1] = np.zeros(self.Wh.shape[1])\n # The outputs at each time step. Again, we save them for later.\n o = np.zeros((T, len(self.Wh[:self.Wh.shape[1]])))\n # For each time step...\n for t in np.arange(T):\n # Note that we are indxing U by x[t]. This is the same as multiplying U with a one-hot vector.\n #s[t] = np.tanh(U[:, x_t[]] + W.dot(s[t - 1]))\n o[t] = softmax(self.V.dot(s[t]))\n return s, o\n \n m, i = x_t.shape\n Wi = self.Wh[:i]\n Wh = self.Wh[i:]\n print(\"wi\", Wi.shape, \"wh\", Wh.shape)\n print(\"wh\", self.Wh.shape, \"wy\", self.Wy.shape)\n print(\"bh\", self.bh.shape, \"by\", self.by.shape)\n print(\"xtshape\", x_t.shape, \"hprev\", h_prev.shape)\n print(\"one\", self.Wh[:i].shape)\n one = self.Wy.dot(x_t)# np.dot(x_t, Wh) # x_t.dot(self.Wh[:i])\n two = h_prev @ Wh # h_prev.dot(self.Wh[i:])\n sum = one + two\n h_next = np.tanh(sum + self.bh)\n soft = h_next @ self.Wy\n y = self.softmax(soft) # + self.by)\n return h_next, y", "def affine_backward(dout, cache):\n x, w, b = cache\n dx, dw, db = None, None, None\n \n dx = dout.dot(w.T)\n dx = dx.reshape(x.shape)\n \n input_shape = x.shape\n prod = 1\n for i in range(1,len(input_shape)):\n prod *= input_shape[i]\n\n x_reshaped = x.reshape(x.shape[0], prod)\n dw = (x_reshaped.T).dot(dout)\n\n db = np.sum(dout,axis=0)\n \n return dx, dw, db", "def backward(self, top, propagate_down, bottom):\n\t\tpass", "def test_forward_backward(self):\n f = forward(self.obs, self.S, self.A, self.E)\n b = backward(self.obs, self.S, self.A, self.E)\n fp = logsumexp(f[:, -1])\n emission = precompute_emission(np.log(self.E))[tuple(self.obs[0])]\n bp = logsumexp(np.log(self.S) + emission + b[:, 0])\n assert_allclose(fp, bp)", "def backward(ctx, dy):\n y = ctx.y\n if ctx.eagerly_discard_variables:\n del ctx.y\n for i in range(len(ctx.reversible_blocks) - 1, -1, -1):\n y, dy = ctx.reversible_blocks[i].backward_pass(y, dy, not ctx.eagerly_discard_variables)\n if ctx.eagerly_discard_variables:\n del ctx.reversible_blocks\n return dy, None, None", "def affine_backward(dout, cache):\n x, w, b = cache\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the affine backward pass. #\n ###########################################################################\n dim_shape = np.prod(x[0].shape)\n N = x.shape[0]\n X = x.reshape(N, dim_shape)\n # input gradient\n dx = dout.dot(w.T)\n dx = dx.reshape(x.shape)\n # weight gradient\n dw = X.T.dot(dout)\n # bias gradient\n db = dout.sum(axis=0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db", "def affine_backward(dout, cache):\n x, w, b = cache\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the affine backward pass. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n t = np.reshape(x,(x.shape[0],np.prod(np.shape(x)[1:])))\n dx = np.dot(dout,w.T)\n dx = np.reshape(dx,np.shape(x))\n db = np.sum(dout, axis = 0)\n dw = np.dot(t.T,dout)\n\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db", "def backward(self):\n if self.d_out_d_in is None:\n raise Exception(\"Haven't computed the loss!\")\n return self.d_out_d_in", "def backwardpass(self, grad):\n return (self.x>0) * grad", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def _backward(self):\n if self.units[0].value > 0:\n self.units[0].gradient += 1 * self.utop.gradient\n else:\n self.units[0].gradient += 0 * self.utop.gradient", "def backppg_ce(self,x,y):\n activation = x\n activations = [x]\n zs = []\n #feed forward\n for w,b in zip(self.weights,self.biases):\n z = np.dot(w, activation)+b\n zs.append(z)\n activation = sigmod(z)\n activations.append(activation)\n #back propagation\n delta_w = [np.zeros(w.shape) for w in self.weights]\n delta_b = [np.zeros(b.shape) for b in self.biases]\n delta = activations[-1]-y\n delta_w[-1] = np.dot(delta, activations[-2].transpose())\n delta_b[-1] = delta\n for j in xrange(2, self.numlayers):\n delta = np.dot(self.weights[-j+1].transpose(), delta)*sigmod_deri(zs[-j])\n delta_b[-j] = delta\n delta_w[-j] = np.dot(delta, activations[-j-1].transpose())\n return (delta_b, delta_w)", "def affine_backward(dout, cache):\n x, w, b = cache\n dx, dw, db = None, None, None\n \n x_shape = x.shape\n x_mutated = x.reshape(x_shape[0], np.prod(x_shape[1:]))\n\n dx = np.dot(dout, w.T)\n dw = np.dot(x_mutated.T, dout)\n db = np.sum(dout, axis=0)\n\n dx = dx.reshape(x_shape)\n return dx, dw, db", "def backward(self, b):\n\n self.b = [b]\n\n # calculate the estimated errors on each layer ($\\delta$)\n for k,w in reversed(list(enumerate(self.weights[1:]))):\n if self.has_bias:\n delta = numpy.dot(self.b[0], w[1:].T)\n act = self.a[k+1][:,1:]\n else:\n delta = numpy.dot(self.b[0], w.T)\n act = self.a[k+1]\n self.b.insert(0, delta*self.hidden_activation.f_prime_from_f(act))\n\n self.d = []\n for a,b in zip(self.a[:-1], self.b):\n self.d.append(numpy.dot(a.T, b) / len(b))\n\n return self.d", "def backward(self):\n gradient = blah\n return gradient", "def backward(self):\n gradient = blah\n return gradient", "def backward(self, grad_output):\n grad_input = grad_output\n for module in reversed(self.modules):\n grad_input = module.backward(grad_input)\n return grad_input", "def backward(ctx, grad_output):\n inds, wgts = ctx.saved_tensors\n grad_inputs = trilinear_devoxelize_backward(grad_output.contiguous(),\n inds, wgts, ctx.r)\n return grad_inputs.view(grad_output.size(0), grad_output.size(1), ctx.r,\n ctx.r, ctx.r), None, None, None", "def conv_backward_naive(dout, cache):\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the convolutional backward pass. #\n ###########################################################################\n #Extract variables from cache.\n x,w,b,conv_param = cache\n stride = conv_param['stride']\n pad = conv_param['pad']\n #Extract shapes(lots of dimensions can become buggy)\n N,F,out_height,out_width = dout.shape\n #Save filter dimensions.\n HH,WW = w.shape[2],w.shape[3]\n #Start by computing gradient of the bias.(always the simplest one)\n db = np.sum(np.sum(np.sum(dout,axis = 3),axis = 2),axis = 0)\n dw = np.zeros_like(w)\n dx = np.zeros_like(x)\n #Start computing gradient of w and x.(Naive implementation)\n #Go over each filter in w.\n for i in range(F):\n #Go over each training example.\n for j in range(N):\n curr_x = x[j,:,:,:]\n #Get current gradient of activation map for j filter on i training example.\n curr_dout = dout[j,i,:,:]\n a = 0;b = 0\n #print(\"HERE\",curr_x.shape)\n #print(\"Stride:\",stride)\n for t in range(0,curr_x.shape[1] - WW + 1,stride):\n for k in range(0,curr_x.shape[2] - HH + 1,stride):\n #print(\"t: %d k: %d WW:%d HH:%d \" % (t,k,WW,HH))\n dw[i,:,:,:] += curr_dout[a,b] * curr_x[:,t:(t + WW),k:(k + HH)]\n dx[j,:,t:(t + WW),k:(k + HH)] += curr_dout[a,b] * w[i,:,:,:]\n if(b == dout.shape[3] - 1):\n a += 1\n b = 0\n else:\n b += 1\n #Remove padding.\n dx = dx[:,:,pad : (dx.shape[2] - pad),pad: (dx.shape[3] - pad)] \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db", "def __backward(self, dA, cache, derivative_activate_fn):\n A_prev, W, b, Z, D = cache\n\n m = A_prev.shape[1]\n\n # Mask\n dA = np.multiply(dA, D) / self.keep_prob\n\n dZ = dA * derivative_activate_fn(Z)\n dW = (1.0 / m) * np.dot(dZ, A_prev.T)\n db = (1.0 / m) * np.sum(dZ, axis=1, keepdims=True)\n dA_prev = np.dot(W.T, dZ)\n\n\n assert (dW.shape == W.shape)\n assert (db.shape == b.shape)\n assert (dA_prev.shape == A_prev.shape)\n\n return dA_prev, dW, db", "def rnn_step_backward(self, grad_next, cache):\n\n th, h_prev, x = cache\n dz = grad_next * (1 - th**2)\n dh_prev = np.dot(dz, self.W.T)\n self.dW += np.dot(h_prev.T, dz) / grad_next.shape[0]\n self.dU += np.dot(x.T, dz) / grad_next.shape[0]\n self.db += np.sum(dz, axis=0) / grad_next.shape[0]\n\n return dh_prev", "def backward(self, y):\n pass", "def tanh_backward(dA, internal_params):\n Z = internal_params\n Zt=tanh(Z)\n dzp=np.power(Zt,2)\n print(dzp.shape)\n dZ=np.multiply(dzp,dA)\n return dZ\n # raise NotImplementedError", "def move_backward():\n pass", "def backward_pass(self):\r\n # the gradient of cross-entropy on top of softmax is (t-y)\r\n back_output = (self.targets - self.y) / self.y.shape[0]\r\n\r\n for layer in reversed(self.layers):\r\n back_output = layer.backward_pass(back_output)", "def backward_pass(self, delta):\r\n self.d_x = np.dot(delta, self.w.T)\r\n self.d_b = np.matmul(np.ones((1, delta.shape[0])), delta)\r\n self.d_w = np.dot(self.x.T, delta)\r\n return self.d_x", "def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input[input < 0] = 0\n return grad_input", "def backward(self, gradient):\n #TODO\n pass", "def backward(self, gradient):\n #TODO\n pass", "def backward(self, grad):\n self.grads[\"w\"] = np.matmul(self.input_data.T, grad)\n self.grads[\"b\"] = np.sum(grad, axis=0)\n return np.matmul(self.input_data.T, grad)", "def affine_backward(dout, cache):\n x, w, b = cache\n dx, dw, db = None, None, None\n ########################################################################\n # TODO: Implement the affine backward pass. #\n # Hint: Don't forget to average the gradients dw and db #\n ########################################################################\n\n n = x.shape[0]\n\n dx = dout.dot(w.T)\n dx = np.reshape(dx, x.shape)\n\n dw = (x.reshape(x.shape[:1] + (-1,)).T).dot(dout) / n\n dw = np.reshape(dw, w.shape)\n\n db = np.mean(dout, axis=0, keepdims=False)\n\n ########################################################################\n # END OF YOUR CODE #\n ########################################################################\n return dx, dw, db", "def affine_backward(dout, x, w, b):\n ############################################################################\n # TODO: Implement the affine backward pass. #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n # Get the dimension first\n N = x.shape[0]\n D,M = w.shape\n # As we know dout is upstream derivatives\n raw_dx = np.dot(dout,w.T)\n # We need to reshape raw dx\n dx = raw_dx.reshape(x.shape)\n # We also want to calculate\n new_x = x.reshape(N,D)\n dw = np.dot(new_x.T,dout)\n # Get the bias\n db = np.sum(dout,axis=0)*1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return dx, dw, db", "def backward(ctx, grad_output):\n diff, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input = grad_input + diff\n return grad_input", "def conv_backward_naive(dout, cache):\n dx, dw, db = None, None, None\n #############################################################################\n # TODO: Implement the convolutional backward pass. #\n #############################################################################\n x, w, b, conv_param = cache\n N, C, H, W = x.shape\n F, C, HH, WW = w.shape\n N, F, Hc, Wc = dout.shape\n stride = conv_param['stride']\n\n print(dout.shape)\n print(x.shape)\n print(w.shape)\n\n #dout = np.pad(dout, ((0,0),(0,0),(1,1),(1,1)), mode='constant', constant_values=0)\n xp = np.pad(x, ((0,0),(0,0),(1,1),(1,1)), mode='constant', constant_values=0)\n\n db = np.array([np.sum(dout[:,i,:,:]) for i in xrange(F)])\n dw = np.random.randn(F, C, HH, WW)\n for f in xrange(F):\n for c in xrange(C):\n for hh in xrange(HH):\n for ww in xrange(WW):\n dw[f, c, hh, ww] = np.sum(dout[:, f, :, :] * xp[:, c, hh:H+hh:stride, ww:W+ww:stride])\n\n dx = np.zeros(x.shape)\n dx = np.pad(dx, ((0,0), (0,0), (1,1), (1,1)), mode='constant', constant_values=0)\n for i in xrange(N):\n for hh in xrange(HH):\n for ww in xrange(WW):\n whw = w[:, :, hh, ww].T\n for hc in xrange(Hc):\n for wc in xrange(Wc):\n he = hc * stride + hh\n wi = wc * stride + ww\n dx[i, :, he, wi] += np.sum(whw * dout[i, :, hc, wc], axis=1)\n \n dx = dx[:, :, 1:-1, 1:-1]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx, dw, db", "def BackwardAlg (self, State, Sequence):\n\t\tif not self.Verified:\n\t\t\tprint 'HMM not correctly formed'\n\t\t\treturn []\n\t\tiSeq = [self.Observations.index(obs) if obs in self.Observations else -1 for obs in Sequence]\n\t\tif -1 in iSeq:\n\t\t\tprint iSeq.count(-1),'elements in the sequence are not observations'\n\t\t\treturn []\n\t\tprint Sequence,len(Sequence)\n\n\t\tBackwardMatrix = [[0.0]* len(self.States) for n in range(len(Sequence))]\n\t\tBackwardMatrix[-1] = np.array( [1.0]* len(self.States) )\n\t\tfor ix in range(len(Sequence)-2,-1,-1):\n\t\t\tBackwardMatrix[ix] = np.dot( BackwardMatrix[ix+1] * np.array(self.EPM[iSeq[ix+1]]), self.TPM )\n\n\t\tprint BackwardMatrix\n\t\treturn BackwardMatrix[0]", "def forward_backward_prop(data, labels, params, dimensions):\n\n ### Unpack network parameters (do not modify)\n ofs = 0\n Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])\n\n activation = []\n\n W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))\n ofs += Dx * H\n b1 = np.reshape(params[ofs:ofs + H], (1, H))\n ofs += H\n W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))\n ofs += H * Dy\n b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))\n\n ### Forward propagation\n activation.append(data)\n\n # Hidden layer inputs: (N, Dx) * (Dx, H) -> N x H\n z = np.dot(activation[-1], W1) + b1 \n # Activations, inputs to the final layer. \n activation.append(sigmoid(z)) # output of the hidden layer, activation\n # Final layer outputs: ( N x H ) * ( H, Dy) -> (N, Dy)\n z = np.dot(activation[-1], W2) + b2\n activation.append( softmax(z) )\n\n # Cross-entropy cost\n\n y_p = activation[-1]\n activation = activation[:-1] # remove activation data (output)\n\n cost = -np.sum(labels * np.log(y_p))\n \n error = []\n \n ### backward propagation\n sigma = (y_p - labels)\n error.append(sigma)\n\n gradb2 = np.sum(error[-1], axis=0)\n gradW2 = np.dot(activation[-1].T, error[-1])\n\n #\n sigma = np.dot(W2, error[-1].T)\n sigma = sigma.T * sigmoid_grad(activation[-1])\n activation = activation[:-1] # remove activation data ( hidden layer )\n\n error.append(sigma)\n\n gradb1 = np.sum(error[-1], axis=0)\n gradW1 = np.dot(activation[-1].T, error[-1])\n\n\n ### Stack gradients (do not modify)\n grad = np.concatenate((gradW1.flatten(), gradb1.flatten(), \n gradW2.flatten(), gradb2.flatten()))\n \n return cost, grad", "def backward(self, grad_output):\n input, = self.saved_tensors\n grad_input = grad_output.clone()\n grad_input[input < -1] = 0\n grad_input[input > 1] = 0\n return grad_input, None", "def backward(self, input_train, input_train_label):\n batchSize = len(input_train) #liczba obrazow podawanych na wejscie w trakcie jednej iteracji\n weights = self.Weights\n biases = self.Biases\n delta_W = self.delta_W\n delta_B = self.delta_B\n poolParams = self.poolParams\n dW_list = []\n dB_list = []\n dW4 = np.zeros(weights[4].shape)\n dB4 = np.zeros(biases[4].shape)\n dW3 = np.zeros(weights[3].shape)\n dB3 = np.zeros(biases[3].shape)\n dW2 = np.zeros(weights[2].shape)\n dB2 = np.zeros(biases[2].shape)\n dW1 = np.zeros(weights[1].shape)\n dB1 = np.zeros(biases[1].shape)\n dW0 = np.zeros(weights[0].shape)\n dB0 = np.zeros(biases[0].shape)\n loss = 0\n for image in range(batchSize):\n\n X_data = input_train[image]\n X_label = input_train_label[image]\n output_forward, cache = self.forward(X_data) \n loss += -1*sum(X_label - np.log(output_forward)) #obliczenie wartosci funkcji straty [cross entropy]\n\n #Propagacja wsteczna gradientu\n dy = -1*(X_label - output_forward)/2\n #print(\"X_label = {} \\t layer7 = {} \\t dy = {}\".format(X_label, output_forward, dy))\n\n [dy, dW, dB ] = fullycon_b(cache[6], np.asarray([dy]).transpose() , weights[4])\n dW4 += dW\n dB4 += dB.flatten() #wektoryzacja macierzy\n dy = act.relu_b(dy.transpose(), cache[6])\n\n [dy, dW, dB ] = fullycon_b(cache[5][:,0], dy, weights[3])\n dW3 += dW\n dB3 += dB.flatten()\n dy = act.relu_b(dy.transpose(), cache[5][:,0]) \n \n [dy, dW, dB ] = convolution_b(cache[4], dy, weights[2])\n dW2 += dW\n dB2 += dB.flatten()\n \n dy = maxpool_b(cache[3], dy)\n dy = act.relu_b(dy, cache[3])\n\n [dy, dW, dB ] = convolution_b(cache[2], dy, weights[1])\n dW1 += dW\n dB1 += dB.flatten()\n \n dy = maxpool_b(cache[1], dy)\n dy = act.relu_b(dy, cache[1]) \n\n [dy, dW, dB ] = convolution_b(np.asarray([cache[0]]), dy, weights[0])\n dW0 += dW\n dB0 += dB.flatten()\n\t\t\t\n dW_list.append(dW4)\n dB_list.append(dB4)\n dW_list.append(dW3)\n dB_list.append(dB3)\n dW_list.append(dW2)\n dB_list.append(dB2)\n dW_list.append(dW1)\n dB_list.append(dB1)\n dW_list.append(dW0)\n dB_list.append(dB0)\n dW_list = dW_list[::-1]\n dB_list = dB_list[::-1]\n \n #Aktualizacja parametrow kazdej z warstw (o ile takie posiada)\n #uczenie z metoda momentum: learning rate = const; alpha = const\n for x in range(len(dW_list)):\n delta_W[x] = alpha*delta_W[x] - eta*dW_list[x]/batchSize\n weights[x] += delta_W[x]\n delta_B[x] = alpha*delta_B[x] - eta*dB_list[x]/batchSize\n biases[x] += delta_B[x]\n #przypisanie nowych wag po aktualiacji wszystkich parametrow\n self.Weights = weights\n self.Biases = biases\n\n #zwrocenie stosunku wartosci f-cji straty do rozmiaru batch'u\n return loss/batchSize", "def backward(cls, grad_out, activated_out):\n new_grad = grad_out.copy()\n new_grad[activated_out == 0] = 0\n return new_grad", "def _backward_pass(Et, Q):\n if not use_numba:\n m, x, y = 1, 0, 2\n n_1, m_1, _ = Q.shape\n new = Q.new\n N, M = n_1 - 2, m_1 - 2\n E = new(N + 2, M + 2).zero_()\n E[N + 1, M + 1] = 1 * Et\n Q[N + 1, M + 1] = 1\n for i in reversed(range(1, N + 1)):\n for j in reversed(range(1, M + 1)):\n E[i, j] = Q[i + 1, j, x] * E[i + 1, j] + \\\n Q[i + 1, j + 1, m] * E[i + 1, j + 1] + \\\n Q[i, j + 1, y] * E[i, j + 1]\n else:\n import collections\n if isinstance(Et, collections.abc.Sequence):\n Et_float = float(Et[0])\n else:\n Et_float = float(Et)\n E = torch.from_numpy(_backward_pass_numba(\n Et_float, Q.detach().cpu().numpy()))\n\n return E", "def linear_backward(dZ, cache):\n A_prev, W, b = cache\n m = A_prev.shape[1]\n\n ### START CODE HERE ### (≈ 3 lines of code)\n dW = (np.dot(dZ, A_prev.T))/m\n db = (np.sum(dZ, axis=1, keepdims=True))/m\n dA_prev = np.dot(W.T, dZ)\n ### END CODE HERE ###\n \n assert (dA_prev.shape == A_prev.shape)\n assert (dW.shape == W.shape)\n assert (db.shape == b.shape)\n \n return dA_prev, dW, db", "def backward(ctx, de, dv):\n e, v, S = ctx.saved_tensors\n n, k = v.shape\n A = S.reshape(n, n)\n\n print('e=', e)\n vt = v.transpose(-2, -1)\n print('vt=', vt)\n print('de=', de)\n print('dv=', dv)\n\n if dv is None:\n A_bar = T.mm(v, T.mm(T.diag(de), vt))\n else:\n vtdv = T.mm(vt, dv)\n print('vtdv=', vtdv)\n F = T.ones_like(vtdv) * e\n F = (F - F.transpose(-2, -1)) ** -1\n F.diagonal().fill_(0)\n\n print('F=',F)\n\n A_bar = T.mm(v, T.mm(T.diag(de) + F * vtdv, vt))\n\n\n for i in range(k):\n break\n for j in range(k):\n if i < j:\n A_bar[i,j] *= 2\n elif i>j:\n A_bar[i,j] *= 0\n A_bar = (A_bar + A_bar.transpose(-2, -1))/2\n print('A_bar=', A_bar) \n return A_bar\n S_bar = A_bar.flatten()\n return S_bar", "def affine_backward(dout, cache):\n #decouple cache.\n x, w, b = cache\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the affine backward pass. #\n ###########################################################################\n reshaped_x = np.reshape(x,(int(x.shape[0]),int(np.prod(x.shape) / x.shape[0])))\n db = np.sum(dout,axis = 0)\n dw = reshaped_x.T.dot(dout)\n dx = np.reshape(dout.dot(w.T),x.shape)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db", "def backward(self, out_grad, input):\n raise NotImplementedError", "def propagate_backward(self, h):\n h = h.mm(self.feedbackweights.t())\n if self.feedbackbias is not None:\n h += self.feedbackbias.unsqueeze(0).expand_as(h)\n return self.feedback_activationfunction(h)", "def dropout_backward(dout, cache):\n dropout_param, mask = cache\n mode = dropout_param['mode']\n p = dropout_param['p']\n dx = None\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase backward pass for inverted dropout #\n #######################################################################\n dx = dout * mask\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n dx = dout\n return dx", "def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input[torch.abs(input) > 1.001] = 0\n return grad_input", "def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input[torch.abs(input) > 1.001] = 0\n return grad_input", "def backward(self, x_out, x_target):\r\n return 2*(x_out - x_target)", "def backward(self):\n #initiate the gradients\n #print('')\n \n #print('node {} grad {}'.format(self.id, self.gradient))\n #print('node {} times visited : {}/{}'.format(self.id, self.times_visited, self.times_used))\n\n if self.gradient is None:\n self.gradient=np.eye(self.output_dim)\n self.times_visited+=1\n\n \n \n if self.childrens==[]:\n return(self.gradient)\n else:\n self.backward()\n \n else: \n if self.childrens!=[]:\n #we can still going deeper in backprop\n #print(len(self.childrens), ' childrens', str([self.childrens[i]['node'].id for i in range(len(self.childrens))]))\n for child in self.childrens:\n node,jacobian=child['node'], child['jacobian']\n \n new_grad = np.dot(self.gradient, jacobian)\n #print(node.gradient)\n #print(new_grad)\n \n if node.gradient is None:\n node.gradient = new_grad\n else: \n node.gradient += new_grad\n \n node.times_visited+=1\n #print('looking at node {} \\ngradient {}'.format(node.id, node.gradient))\n\n \n if node.times_used ==node.times_visited: \n #print(node.gradient)\n node.backward() \n else:\n #still some computations to perform upwards before going deeped\n #print('node {} visits : {}/{}'.format(node.id, node.times_visited, node.times_used))\n pass", "def linear_activation_backward(dA, cache, activation):\n pass" ]
[ "0.6939449", "0.66526806", "0.658141", "0.65265906", "0.65174395", "0.6487711", "0.64826727", "0.6444466", "0.643047", "0.64284015", "0.64284015", "0.6420275", "0.6388756", "0.6362412", "0.63552135", "0.6349587", "0.630616", "0.6306138", "0.62945306", "0.6274134", "0.62633884", "0.62595814", "0.6252691", "0.6244913", "0.6199603", "0.61976445", "0.6189757", "0.61865175", "0.6178224", "0.6175904", "0.61743224", "0.6171326", "0.6171326", "0.6171326", "0.61679363", "0.6166549", "0.6154761", "0.6134797", "0.61329734", "0.6124344", "0.61102766", "0.61094415", "0.61069775", "0.60673624", "0.6062285", "0.60620546", "0.60469216", "0.6037967", "0.6037967", "0.6037967", "0.6037967", "0.6037967", "0.6037967", "0.6037967", "0.6037967", "0.6037967", "0.6037967", "0.6037967", "0.6037967", "0.6037967", "0.60206014", "0.60195184", "0.6012445", "0.60106754", "0.60070777", "0.60070777", "0.60057324", "0.60017633", "0.5992029", "0.59918386", "0.5989538", "0.5984112", "0.59832025", "0.5981972", "0.5979124", "0.5970495", "0.59677035", "0.59675425", "0.59675425", "0.59663934", "0.5965679", "0.596565", "0.596535", "0.5964696", "0.5958574", "0.595776", "0.5954987", "0.5953847", "0.5947734", "0.5936242", "0.59347326", "0.593396", "0.5922711", "0.59191257", "0.5918728", "0.59123427", "0.5911882", "0.5911882", "0.5909703", "0.59087855", "0.5902142" ]
0.0
-1
Main function to process user input and then generate the description files for each run
def run_main(): parser = argparse.ArgumentParser(description="Scan a run directory and create files to ") parser.add_argument('--run-directory', dest='run_directory', action='store', default='', help='path to directory with xed files to process') args = parser.parse_args(sys.argv[1:]) if not os.path.isdir(args.run_directory): sys.stderr.write("{0} is not a directory, exiting\n".format(args.run_directory)) return 1 run_name = os.path.abspath(args.run_directory) if os.path.basename(run_name): run_name = os.path.basename(run_name) else: run_name = os.path.split(run_name)[0].split('/')[-1] if not os.path.exists('info'): os.mkdir('info') for directory in os.listdir(args.run_directory): if not os.path.isdir(os.path.join(args.run_directory, directory)): continue csv_filename = "info/{0}_{1}_files.csv".format(run_name, directory) entries = glob.glob(os.path.join(args.run_directory, directory, '*.xed')) if len(entries) == 0: continue with open(csv_filename, 'w') as file_obj: csv_writer = csv.writer(file_obj) csv_writer.writerow(['Run', 'Data Set', 'File']) for entry in entries: uri = "srm://ceph-se.osgconnect.net:8443/srm/v2/" + \ "server?SFN=/cephfs/srm/xenon/" + \ entry.replace('/xenon/', '') csv_writer.writerow([run_name, directory, uri])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n args = parse_args()\n if check_args(args):\n read_descriptions(args)\n generate_deletes(args)", "def main():\r\n# Checking if argument was provided\r\n if len(sys.argv) <=1:\r\n print_usage()\r\n sys.exit(1)\r\n \r\n for arg in sys.argv:\r\n # Checking if help was called\r\n if arg == \"-h\" or arg == \"--help\":\r\n print_usage()\r\n sys.exit(1)\r\n \r\n # Checking for verbose mode \r\n if arg == \"-v\" or arg == \"--verbose\":\r\n global verbose_flag\r\n verbose_flag=1\r\n\r\n # Checking for input file\r\n if arg == \"-f\" or arg == \"--file\":\r\n global default_input_path\r\n global default_output_path\r\n default_input_path = sys.argv[2]\r\n default_output_path=default_input_path[:-4] + \"_results.txt\"\r\n\r\n #if arg == \"-u\" or arg == \"--url\":\r\n # input_url = sys.argv[2]\r\n\t \r\n if os.name == \"nt\":\r\n os.system('cls')\r\n else:\r\n os.system('clear')\r\n \r\n process_from_file()", "def main():\n outfile = 'result.txt'\n\n if os.path.exists(outfile):\n os.remove(outfile)\n\n for arg in sys.argv[1:]:\n get_info(arg, outfile)", "def main(argv): \n args = parse_args() \n \n # create output directory\n create_out_dir(args.out)\n # collect all the necessary information (for all scenarios)\n fileinfos = get_fileinfos(args.coll_path, args.namespace, args.xpath, args.out)\n \n if (args.mode == \"all\"):\n process_all(fileinfos, args)\n else:\n process_single(fileinfos, args)\n\n stdout.write(\"Done: the element usage overview has been created\\n\")", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--filepath\", default=None, type=str, required=True, help=\"Path to dataset\")\n parser.add_argument(\"--truncate\", action='store_true', help=\"Truncate the data when enabled\")\n parser.add_argument(\"--stats\", action='store_true', help=\"Get stats for the file\")\n parser.add_argument(\"--count_vocab\", action='store_true', help=\"Get vocabulary count and save vocabulary for the file\")\n ##generation\n parser.add_argument('--generate', action='store_true', help=\"Start the generation\")\n parser.add_argument(\"--temperature\", type=float, default=1.0, help=\"Softmax temperature setting\")\n parser.add_argument(\"--length\", type=int, default=150, help=\"number of words to be generated\")\n parser.add_argument(\"--top_k\", type=int, default=1, help=\"parameter for Top-k sampling\")\n parser.add_argument('--stop_token', type=str, default=None, help=\"Token at which text generation is stopped\")\n parser.add_argument('--num_samples', type=int, default=500, help=\"Number of samples to be generated and compared with\")\n parser.add_argument('--save_dir', default=\"../save/\", type=str, help=\"Path to save the system outputs\")\n parser.add_argument(\"--no_cuda\", action='store_true', help=\"Avoid using CUDA when available\")\n ##evaluation\n parser.add_argument(\"--evaluate\", action='store_true', help=\"Start the evaluation\")\n parser.add_argument(\"--eval_dir\", default='../save/gpt2/', help=\"The path to evaluate the system outputs\")\n parser.add_argument(\"--eval_model\", default='gpt2', help=\"The model name to evaluate the system outputs\")\n parser.add_argument(\"--reading_scores\", action='store_true', help=\"Get the average reading scores\") #OK\n parser.add_argument(\"--content_words\", action='store_true', help=\"Get the normalized mean of content words and stop words\") #OK\n parser.add_argument(\"--ngram_overlap\", action='store_true', help=\"Get the average N gram overlap percentage with the prompt\") #OK\n parser.add_argument(\"--sw\", action='store_true', help=\"Do stopword elimination\")\n parser.add_argument(\"--stem\", action='store_true', help=\"Do stemming\")\n parser.add_argument(\"--parse_scores\", action='store_true', help=\"Get the average, skewness and kurtosis of the parses of stories\") \n parser.add_argument(\"--sentemb_sim_scores\", action='store_true', help=\"Get the sentence embedding similarity percentage with the prompt\")\n parser.add_argument(\"--sent_length\", action='store_true', help=\"Get the average sentence length\")\n parser.add_argument(\"--pos_tag_fqd\", action='store_true', help=\"Get POS tag frequency distribution as percentages\")\n parser.add_argument(\"--log_unigm_prob\", action='store_true', help=\"Get the average log unigram probability\")\n # parser.add_argument(\"--coherence_scores\", action='store_true', help=\"Get the average coherence scores\") \n args = parser.parse_args()\n\n\n filepath = args.filepath\n truncate_bool = args.truncate\n stats_bool = args.stats \n vocab_bool = args.count_vocab\n #generation\n generate_bool = args.generate\n temperature = args.temperature\n length = args.length\n top_k = args.top_k\n stop_token = args.stop_token\n num_samples = args.num_samples\n save_dir = args.save_dir\n no_cuda_bool = args.no_cuda\n #evaluation\n evaluate_bool = args.evaluate\n eval_direcpath = args.eval_dir #path to the model folder\n eval_modelname = args.eval_model #name of the model evaluating\n eval_RS = args.reading_scores #evaluate reading scores\n eval_CW = args.content_words #evaluate the percentage of content and stop words\n eval_NG = args.ngram_overlap #evaluate story prompt relatedness scores with ngram overlap pc\n eval_PS = args.parse_scores #evaluate the grammaticality\n eval_SE = args.sentemb_sim_scores #evaluate story prompt relatedness scores\n eval_SL = args.sent_length #evaluate the syntactic complexity\n eval_PF = args.pos_tag_fqd #evaluate the pos-tag frequency distribution as percentages\n eval_RW = args.log_unigm_prob #evaluate the rareword usage scores as mean log unigram probability\n sw = False\n if args.sw:\n sw = True\n stem = False\n if args.stem:\n stem = True\n\n f_prep = FilePreprocessor(filepath) \n if truncate_bool: #required when you are running the code the first time\n f_prep.truncate_stories(num_words=1000)\n if stats_bool:\n num_stories, num_prompts = f_prep.check_num_stories()\n print (num_prompts, num_stories) \n if vocab_bool:\n vocab_counter_prompt, vocab_counter_story = f_prep.make_vocabulary()\n print (\"The vocabulary for the stories: {}\".format(vocab_counter_story))\n print (\"The vocabulary for the prompts: {}\".format(vocab_counter_prompt))\n ##### get the prompt from the file -- done\n ##### get the model type and model file name and path as a dictionary -- done\n ##### for each model type save the prompt, the original story and the generated story with \"temp val\" and \"top k\" val and \"model name\" and \"index of random story prompt selected\" in a file: \"gentext_\"+model_+\"_\"+temperature+\"_\"+top_k+\"_\"+i -- done\n ##### finish the 4 openai gptx models and then move onto xlnet models --done\n if generate_bool:\n # define the pre-trained models offered by huggingface/transformers github: https://github.com/huggingface/transformers for generation\n # Model classes at https://github.com/huggingface/transformers/blob/master/examples/run_generation.py \n if not os.path.exists(save_dir): os.mkdir(save_dir)\n # PT_model_dict = {\"openai-gpt\": [\"openai-gpt\"], \"gpt2\": [\"gpt2\", \"gpt2-medium\", \"gpt2-large\", \"distilgpt2\"], \"xlnet\": [\"xlnet-base-cased\", \"xlnet-large-cased\"], \"transfo-xl\": [\"transfo-xl-wt103\"], \"xlm\": [\"xlm-mlm-en-2048\", \"xlm-mlm-ende-1024\", \"xlm-mlm-enfr-1024\", \"xlm-mlm-enro-1024\", \"xlm-mlm-tlm-xnli15-1024\", \"xlm-mlm-xnli15-1024\", \"xlm-clm-enfr-1024\", \"xlm-clm-ende-1024\", \"xlm-mlm-17-1280\", \"xlm-mlm-100-1280\"]}\n PT_model_dict = {\"openai-gpt\": [\"openai-gpt\"], \"gpt2\": [\"gpt2\", \"gpt2-medium\", \"gpt2-large\"], \"xlnet\": [\"xlnet-base-cased\", \"xlnet-large-cased\"], \"transfo-xl\": [\"transfo-xl-wt103\"]}\n # #check values for variables exist\n # assert temperature\n # assert length\n # assert top_k\n print (\"Get the prompts from {} samples in the test set...\".format(num_samples))\n story_files_dict = f_prep.get_art_prp_file()\n story_files_test = story_files_dict['test']\n nums_selected = random.sample(range(len(story_files_test)), num_samples)\n for idx, i in enumerate(nums_selected):\n prompt = (story_files_test[i][0]).replace(\"[ wp ]\", \"\") #remove the tag from the prompt and save it\n story = story_files_test[i][1]\n # print (\"Prompt: {}\".format(prompt))\n # print (\"Original Story: {}\".format(story))\n for k,v in PT_model_dict.items():\n model_type = k\n model_names_list = v\n for model_ in model_names_list:\n print (\"Generating story #{} with model {} ...\".format(idx+1, model_))\n print (\"Selected story prompt: {}\".format(i+1))\n start_time = time.time()\n generated_text = text_generator(model_type=model_type, model_name_or_path=model_, prompt=prompt, padding_text=story[:50], xlm_lang=\"\", length=length, temperature=temperature, top_k=top_k, top_p=0.9, no_cuda=no_cuda_bool, seed=42, stop_token=stop_token, verbose=False)\n time_elapsed = time.time() - start_time\n temp_pc = int(temperature*100)\n filename_ = \"gentext_\"+model_+\"_T\"+str(temp_pc)+\"_k\"+str(top_k)+\"_\"+str(i)+\".txt\"\n with open(os.path.join(save_dir, filename_),'w') as w_f:\n w_f.write(\"Prompt: \" + prompt + \"\\n\")\n w_f.write(\"Original: \" + story + \"\\n\")\n w_f.write(\"Generated: \" + generated_text + \"\\n\")\n w_f.write(\"Time elapsed: \" + str(time_elapsed) + \"\\n\")\n ##### get the directory of the samples by each model --done\n ##### read the files and get the dataframe from each model \n if evaluate_bool:\n print (\"Evaluation for {} model: \".format(eval_modelname))\n eval_modelObj = EvalDQ(eval_direcpath)\n print (\"Reading the samples ...\") \n \n if eval_modelname == \"fusion\":\n df_modelObj = eval_modelObj.read_fusion_output()\n else:\n df_modelObj = eval_modelObj.read_data_strings()\n # print (df_modelObj[\"temp\"].tolist())\n # exit()\n \n temp = set(df_modelObj[\"temp\"].tolist())\n topK = set(df_modelObj[\"topK\"].tolist())\n print (\"The shape of the Dataframe object for model {} is {}:\".format(eval_modelname, df_modelObj.shape))\n print (\"The temperature and k values are: {} and {}:\".format(temp, topK))\n \n if eval_RS:\n print (\"Calculating the Readability scores ... \")\n print (\"For the original stories ...\")\n df_modelObj_RS_original = eval_modelObj.get_readability_scores(df_modelObj,\"original\")\n print (\"The mean reading score values for the original files ...\")\n print (df_modelObj_RS_original.mean(axis=0))\n print (\"For the generated stories ...\")\n df_modelObj_RS_generated = eval_modelObj.get_readability_scores(df_modelObj,\"generated\")\n print (\"The mean reading score values for the generated files ...\")\n print (df_modelObj_RS_generated.mean(axis=0))\n \n if eval_CW:\n print (\"Calculating the percentage of content words VS stop words ...\")\n print (\"For the original stories ...\")\n cw_ct_ori, sw_ct_ori = eval_modelObj.count_contentwords(df_modelObj, \"original\")\n mean_cw_ct_ori = statistics.mean(cw_ct_ori) #look at the normalized mean \n mean_sw_ct_ori = statistics.mean(sw_ct_ori)\n print (\"The normalized mean for content words is {} and for stop words is {}\".format(mean_cw_ct_ori, mean_sw_ct_ori))\n print (\"For the generated stories ...\")\n cw_ct_gen, sw_ct_gen = eval_modelObj.count_contentwords(df_modelObj, \"generated\")\n mean_cw_ct_gen = statistics.mean(cw_ct_gen) #look at the normalized mean \n mean_sw_ct_gen = statistics.mean(sw_ct_gen)\n print (\"The normalized mean for content words is {} and for stop words is {}\".format(mean_cw_ct_gen, mean_sw_ct_gen))\n\n if eval_NG:\n print (\"Calculating the Story Prompt Relatedness scores ... \")\n print (\"Calculating the average n-gram overlap with the prompt...\")\n # avg_ngmoverlap_pc_gen = eval_modelObj.ngram_overlap(df_modelObj, (\"generated\", \"prompt\"), n=3)\n # print (\"The average overlap percentage is {}\".format(avg_ngmoverlap_pc_gen))\n print (\"For the original stories ...\")\n for i in [1,2,3]:\n print (\"Getting the average for n={}\".format(i))\n avg_ngmoverlap_pc_ori = eval_modelObj.ngram_overlap(df_modelObj, (\"original\", \"prompt\"), n=i, sw=sw, stem=stem)\n print (\"The average overlap percentage is {}\".format(avg_ngmoverlap_pc_ori))\n print (\"For the generated stories ...\")\n for i in [1,2,3]:\n print (\"Getting the average for n={}\".format(i))\n avg_ngmoverlap_pc_gen = eval_modelObj.ngram_overlap(df_modelObj, (\"generated\", \"prompt\"), n=i, sw=sw, stem=stem)\n print (\"The average overlap percentage is {}\".format(avg_ngmoverlap_pc_gen))\n\n if eval_PS:\n print (\"Calculating the constituency parsing scores ...\")\n print (\"For the original stories ...\")\n _, skew_scores_ori, kurt_scores_ori = eval_modelObj.parsing_score_calculation(df_modelObj, \"original\")\n mean_skew_scores_ori = statistics.mean(skew_scores_ori) #look at the normalized mean \n mean_kurt_scores_ori = statistics.mean(kurt_scores_ori)\n print (\"The mean skewness is {} and kurtosis is {}\".format(mean_skew_scores_ori, mean_kurt_scores_ori))\n print (\"For the generated stories ...\")\n _, skew_scores_gen, kurt_scores_gen = eval_modelObj.parsing_score_calculation(df_modelObj, \"generated\")\n mean_skew_scores_gen = statistics.mean(skew_scores_gen) #look at the normalized mean \n mean_kurt_scores_gen = statistics.mean(kurt_scores_gen)\n print (\"The mean skewness is {} and kurtosis is {}\".format(mean_skew_scores_gen, mean_kurt_scores_gen))\n \n if eval_SE:\n print (\"Calculating the Story Prompt Relatedness scores ... \")\n print (\"Calculating the sentence embedding similarity with the prompt...\")\n print (\"For the original stories ...\")\n avg_sentemb_sim_ori = eval_modelObj.word2vec_sentsim(df_modelObj, (\"original\", \"prompt\"))\n print (\"The average sentence embedding similarity is {}\".format(avg_sentemb_sim_ori))\n print (\"For the generated stories ...\")\n avg_sentemb_sim_gen = eval_modelObj.word2vec_sentsim(df_modelObj, (\"generated\", \"prompt\"))\n print (\"The average sentence embedding similarity is {}\".format(avg_sentemb_sim_gen))\n\n if eval_SL:\n print (\"Calculating the average sentence length ...\")\n print (\"For the orginal stories ...\")\n sentlen_list_ori = eval_modelObj.average_sentence_length(df_modelObj, \"original\")\n mean_sentlen_ori = statistics.mean(sentlen_list_ori)\n print (\"The average sentence length is {}\".format(mean_sentlen_ori))\n print (\"For the generated stories ...\")\n sentlen_list_gen = eval_modelObj.average_sentence_length(df_modelObj, \"generated\")\n mean_sentlen_gen = statistics.mean(sentlen_list_gen)\n print (\"The average sentence length is {}\".format(mean_sentlen_gen))\n \n if eval_PF:\n print (\"Calculating the POS tag frequency tag distribution ...\")\n print (\"For the original stories ...\")\n df_modelObj_POS_ori = eval_modelObj.pos_tag_freqdist(df_modelObj, \"original\")\n print (\"The mean POS tag percentages for the original files ...\")\n POS_dict_ori = (df_modelObj_POS_ori.mean(axis=0)).to_dict()\n print (\"NOUN: {} and VERB: {}\".format(POS_dict_ori['NOUN']*100, POS_dict_ori['VERB']*100))\n print (\"For the generated stories ...\")\n df_modelObj_POS_gen = eval_modelObj.pos_tag_freqdist(df_modelObj, \"generated\")\n print (\"The mean POS tag percentages for the generated files ...\")\n POS_dict_gen = df_modelObj_POS_gen.mean(axis=0)\n print (\"NOUN: {} and VERB: {}\".format(POS_dict_gen['NOUN']*100, POS_dict_gen['VERB']*100))\n\n if eval_RW:\n print (\"Calculating the rare word usage metrics ...\")\n print (\"For the generated stories ...\")\n mean_ug_prblst_ori = eval_modelObj.get_rareword_usage(df_modelObj)\n mean_ug_ori = statistics.mean(mean_ug_prblst_ori)\n print (\"The average unigram probability is {}\".format(mean_ug_ori))", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('filename', type=argparse.FileType('r'), nargs='+', help='The list of files to generate strdl documentation for')\n args = parser.parse_args()\n for file in args.filename:\n strdl_gen.generate_file(strdl_parser.parse(file))", "def take_action(self, parsed_args):\n combine(parsed_args.run_desc_file)", "def provide_output():\n args = parser.parse_args()\n #convert args to a dictionary\n args_dict = {arg: value for arg, value in vars(args).items() if value \n is not None} \n #store method into a variable\n method = args_dict.pop('method')\n def perform_operation():\n \"\"\"Function to perform all operations requested by the user\"\"\"\n for k in ['en', 'de', 'fr']:\n inst = named_entity_methods_text(k, method)\n if 'list_all' in args_dict:\n inst.save_all_ne_as_list_to_txt()\n if 'list_different' in args_dict:\n inst.save_different_ne_as_list_to_txt()\n if 'percentage' in args_dict:\n inst.save_percentages_to_txt()\n if 'annotated_txt' in args_dict:\n inst.save_annotated_text_to_txt()\n if 'annotated_xml' in args_dict:\n inst.save_annotated_text_to_xml()\n return\n #if we choose the url option\n if 'url' in args_dict:\n url = args_dict.pop('url')\n url = horizon_url(url)\n #save horizon pages into txt\n url.save_horizon_to_txt()\n #perform operations depending on the user input\n perform_operation()\n #if we choose the folder option\n elif 'folder' in args_dict:\n folder = args_dict.pop('folder')\n os.chdir(folder)\n #perform operations depending on the user input\n perform_operation()\n #if we choose the textfile option\n elif 'textfile' in args_dict:\n textfile = args_dict.pop('textfile')\n #initialise counter for folders\n url_nr = 1\n #for every line in the text_file\n for line in textfile:\n #build new directory and move into it\n os.mkdir('url_nr_'+str(url_nr))\n os.chdir('url_nr_'+str(url_nr))\n url = line.replace('\\n', '')\n url = horizon_url(url)\n #save horizon pages into txt\n url.save_horizon_to_txt()\n #perform operations depending on the user input\n perform_operation()\n #update counter for folders\n url_nr += 1\n os.chdir('..')\n elif 'parent_directory' in args_dict:\n parent_directory = args_dict.pop('parent_directory')\n #initialise list for good paths (i.e. the ones containing only txt \n #files)\n good_paths = []\n #all paths\n all_paths = ([x[0] for x in os.walk(parent_directory)])\n for i in all_paths:\n #content of the paths\n content = os.listdir(i)\n #if there is a directory in the folder, then pass. Otherwise, \n #add to list\n for j in content:\n if not j.endswith('txt'):\n pass\n else:\n good_paths.append(i)\n break\n #for every good path\n for i in good_paths:\n #initialise a parameter containing the number of subdirectories \n #of the path\n amount_subdirectories = 1 + i.count('/')\n #go to the directory\n os.chdir(i)\n #perform operations depending on the user input\n perform_operation()\n #come back to the parent directory\n while amount_subdirectories > 0:\n os.chdir('..')\n amount_subdirectories -= 1\n #if no one among url, folder, textfile or parent_directory is provided, \n #return an error and exit\n else: \n raise TypeError('Either -u, -f, -t, or -p must be specified')\n exit(1)", "def main():\n # There are no args, but parse them just so help works\n args = docopt(__doc__)\n print(process_files_json(), end=\"\")\n return None", "def main(args):\n ## Starting time\n start_time = datetime.now()\n ## Reading all elements and converting to python dictionary\n param_dict = vars(args)\n ## Checking for correct input\n param_vals_test(param_dict)\n #\n # Creating instance of `ReadML` with the input parameters\n param_dict['ml_args'] = ReadML(**param_dict)\n ## Program message\n prog_msg = param_dict['Prog_msg']\n # Adding additional parameters\n param_dict = add_to_dict(param_dict)\n ##\n ## Creating Folder Structure\n # proj_dict = cwpaths.cookiecutter_paths(__file__)\n proj_dict = param_dict['ml_args'].proj_dict\n proj_dict = directory_skeleton(param_dict, proj_dict)\n ##\n ## Printing out project variables\n print('\\n'+50*'='+'\\n')\n for key, key_val in sorted(param_dict.items()):\n if key != 'Prog_msg':\n print('{0} `{1}`: {2}'.format(prog_msg, key, key_val))\n print('\\n'+50*'='+'\\n')\n ##\n ## Feature keys\n param_dict['feat_cols_dict'] = param_dict['ml_args'].feat_cols_names_dict(\n return_all=True)\n ##\n ## Reading in the main catalogue\n catl_pd = catl_file_read_clean(param_dict, proj_dict)\n ###\n ### ------ Figures ------ ###\n ##\n ## Comparison of estimated group masses via HAM and Dynamical Masses\n frac_diff_model(param_dict, proj_dict, plot_opt=param_dict['plot_opt'])\n #\n # Covariance Matrix\n covariance_plot(catl_pd, param_dict, proj_dict)\n #\n # Traditional methods for estimating masses\n # pred_masses_halo_mass(param_dict, proj_dict)\n #\n # Fractional Difference plots vs True mass of galaxy GROUPS\n # frac_diff_groups_model(param_dict, proj_dict,\n # plot_opt=param_dict['plot_opt'])\n ##\n ## End time for running the catalogues\n end_time = datetime.now()\n total_time = end_time - start_time\n print('{0} Total Time taken (Create): {1}'.format(prog_msg, total_time))", "def main():\n parsed_args = parse_args()\n dfg = DummyFileGenerator(parsed_args[0], **parsed_args[1])\n dfg.write_output_file(**parsed_args[2])", "def main():\n ff = FileForensics()\n # ff.scan_dir(\"/Users/ns/notes\") # FIXME\n ff.scan_dir(\"/Users/ns/work/termination_data\")\n\n print \"\\n--- BIG FILES ---\"\n for (size, mime, filename) in ff.get_big_files():\n print (bcolors.FAIL+\"{:>10} MB\"+bcolors.ENDC+\" {:<20} {:<10}\").\\\n format(size, mime, filename)\n\n print \"\\n--- FOUND KEYWORDS ---\"\n for (file, matches) in ff.get_keyword_files():\n print \"{:<5} {:<20} ({:<10})\".format(\n len(matches), file[\"mime\"], file[\"filename\"])\n for position, match in matches:\n print \"\\t- {:<10} {:<10}\".format(position, match)\n print\n\n print \"\\n--- HIGH ENTROPY FILES ---\"\n for (file, ent) in ff.get_highentropy_files():\n print (bcolors.FAIL+\"\\t {:.2f}\"+bcolors.ENDC+\" ({:<10}) {:<10}\").\\\n format(ent, file[\"mime\"], file[\"filename\"])", "def main():\n parser = argparse.ArgumentParser(description=\"\"\"Tester for YT Data API and different inputs\"\"\")\n parser.add_argument('-a', '--analytics', help='Performs a basic analytics lookup for the user\\'s channel entered')\n parser.add_argument('-c', '--comments', help='Performs a lookup of comments for the video id entered')\n args = parser.parse_args()\n\n if args.analytics:\n analytics = args.analytics\n analyt(analytics)\n\n if args.comments:\n comments = args.comments\n get_comments(comments)", "def Main():\n statistics_types = frozenset([\n u'codereviews', u'codereviews-history', u'contributions'])\n\n argument_parser = argparse.ArgumentParser(description=(\n u'Generates an overview of project statistics of github projects.'))\n\n argument_parser.add_argument(\n u'-c', u'--config', dest=u'config_path', action=u'store',\n metavar=u'CONFIG_PATH', default=None, help=(\n u'path of the directory containing the statistics configuration '\n u'files e.g. stats.ini.'))\n\n argument_parser.add_argument(\n u'statistics_type', choices=sorted(statistics_types), action=u'store',\n metavar=u'TYPE', default=None, help=u'The statistics type.')\n\n options = argument_parser.parse_args()\n\n if not options.statistics_type:\n print(u'Statistics type missing.')\n print(u'')\n argument_parser.print_help()\n print(u'')\n return False\n\n config_path = options.config_path\n if not config_path:\n config_path = os.path.dirname(__file__)\n config_path = os.path.dirname(config_path)\n config_path = os.path.join(config_path, u'data')\n\n stats_file = os.path.join(config_path, u'stats.ini')\n if not os.path.exists(stats_file):\n print(u'No such config file: {0:s}.'.format(stats_file))\n print(u'')\n return False\n\n output_writer = StdoutWriter()\n\n if not output_writer.Open():\n print(u'Unable to open output writer.')\n print(u'')\n return False\n\n if options.statistics_type.startswith(u'codereviews'):\n usernames = {}\n with open(stats_file) as file_object:\n stats_definition_reader = StatsDefinitionReader()\n usernames = stats_definition_reader.ReadUsernames(file_object)\n\n include_closed = False\n if options.statistics_type == u'codereviews-history':\n include_closed = True\n\n codereviews_helper = CodeReviewIssuesHelper(include_closed=include_closed)\n codereviews_helper.ListIssues(usernames, output_writer)\n\n elif options.statistics_type == u'contributions':\n projects_per_organization = {}\n with open(stats_file) as file_object:\n stats_definition_reader = StatsDefinitionReader()\n projects_per_organization = (\n stats_definition_reader.ReadProjectsPerOrganization(file_object))\n\n user_mappings = {}\n with open(stats_file) as file_object:\n stats_definition_reader = StatsDefinitionReader()\n user_mappings = stats_definition_reader.ReadUserMappings(file_object)\n\n contributions_helper = GithubContributionsHelper(user_mappings)\n contributions_helper.ListContributions(\n projects_per_organization, output_writer)\n\n # TODO: add support for pull requests\n # TODO: add support for more granular CL information\n\n return True", "def main():\n desc=textwrap.dedent('''\\\n Post processing perf/stap/dtrace profiling data. \n Examples:\n 1. post-kp.py -h\n print usage info\n 2. post-kp.py or post-kp.py -i\n interactive mode. waiting for profiling files to be opened\n interactive mode is usefull when the file is huge since the\n file keeps open and all data has been read in memory, and\n multiple cmds need to be run against the file\n 3. post-kp.py file1\n list the inclusive of functions in sorted order By default, \n only the first 20 functions are displayed. -n can be used to\n specify any numbers. \n 4. post-kp.py file1 file2\n list the inclusive difference of fuctions in sorted order \n between file1 and file2.\n 6. post-kp.py -i file1 file2 file3\n open all files, then wait for subcommand.\n 7. post-kp.py -C fn file1\n list all call stacks to fn. -s is used to specify depth of\n stack to show\n 8. post-kp.py -C fn file1 file2\n list call stack diference to fn between file1 and file2\n 9. post-kp.py -c fn file1\n list all functions fn calls\n 10. post-kp.py -f fn file1\n list all expensive instructions in fn\n ''')\n help=textwrap.dedent('''\\\n Usage:\n post-kp.py [-i]\n post-kp.py [-v]\n post-kp.py [-t type] [-n lines] [-i] file1,file2...,filen\n post-kp.py [-Ccf fn] [-s depth] [-n lines] file1, file2\n ''')\n\n parser=argparse.ArgumentParser(description=desc, epilog=help,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\"files\", nargs='*', help=\"input file(s)\")\n parser.add_argument(\"-t\",\"--type\", choices=['in','ex','ina','exa'], default='in')\n parser.add_argument(\"-C\",\"--caller\", help=\"print caller(s) of the function\",\n type=str)\n parser.add_argument(\"-c\",\"--callee\", help=\"print callee(s) of the function\",\n type=str)\n parser.add_argument(\"-f\",\"--func\", help=\"print expensive instrutions in func\",\n type=str)\n parser.add_argument(\"-n\",\"--lines\", help=\"print first n lines\", type=int,\n default=20)\n parser.add_argument(\"-s\",\"--depth\", help=\"print first n lines\", type=int,\n default=1)\n parser.add_argument(\"-i\",\"--interactive\", help=\"interactive mode\",\n action=\"store_true\")\n parser.add_argument(\"-v\",\"--version\", help=\"version\", action=\"store_const\",\n const=version)\n args=parser.parse_args()\n if args.version:\n print(args.version)\n return\n handle=doit(args)\n if args.interactive or not args.files:\n handle.go(args)\n else:\n if args.caller:\n handle.cc(\"caller\",args.caller)\n elif args.callee:\n handle.cc(\"callee\",args.callee)\n elif args.func:\n handle.instruction(args.func)\n else:\n handle.ie(args.type)", "def main():\n p = Path.cwd()\n path = str(p)\n\n files = tracked_files()\n scripts = search_dir(p, path, files, '.py')\n scripts = [i for i in scripts if 'tests/' not in i[:7]]\n scripts = list(map(partial(process, p), scripts))\n\n for script in scripts:\n script['display'] = script['name'].replace('_', '\\_')\n write_readme(scripts)", "def main():\n with open(\"page_data.yaml\", 'r') as inputstr:\n config_data = yaml.safe_load(inputstr)\n ointf = OutputInterface('template.txt')\n table_data = get_song_artist_matches()\n ofilen = config_data['directory'] + os.sep + 'common_songs.html'\n title = 'Song Titles and Band Name Overlap'\n header = ['No.', 'Artist', 'Peak', 'Date', 'Song/Artist', 'Peak',\n 'Date', 'Song']\n ointf.build_page(ofilen, title, header, fmt_table(table_data))\n ointf.inject(XTRAEDIT)\n ointf.output()", "def main(self, verbose=0):\n indepdict=self.scan_for_loop(self.indeploop)\n pegdict1 = self.scan_for_loop(self.pegloop1)\n pegdict2 = self.scan_for_loop(self.pegloop2)\n if len(indepdict.keys()) == 0 and len(pegdict1.keys()) == 0 and len(pegdict2.keys()) == 0:\n return dict()\n alldict = dict(indepdict)\n alldict.update(pegdict1)\n alldict.update(pegdict2)\n indepcomb=self.get_combo_list(indepdict, 0)\n pegcomb1=self.get_combo_list(pegdict1, 1)\n pegcomb2=self.get_combo_list(pegdict2, 1)\n allcombs = self.combine_three_combo_lists(indepcomb, pegcomb1, pegcomb2)\n datasets = self.prepare_looped_datasets(alldict, allcombs)\n createdfiles = self.create_input_files(datasets)\n if verbose == 1:\n self.print_list(indepcomb)\n self.print_list(pegcomb1)\n self.print_list(pegcomb2)\n self.print_list(allcombs)\n for datakey in datasets:\n self.print_list(datasets[datakey])\n return createdfiles", "def main():\n stats.set_time_start()\n\n if config.options.show_progress:\n stats.start_monitor()\n\n recorders = Recorder.launch(config.options.recorders)\n\n try:\n for filename in config.filenames:\n parser.parse(filename)\n\n Recorder.wait_empty()\n except KeyboardInterrupt:\n pass\n\n stats.set_time_stop()\n\n if config.options.show_progress:\n stats.stop_monitor()\n\n stats.print_summary()", "def main():\n parser = argparse.ArgumentParser(description=\"\"\"\n Generate a spec file (in YAML) for processing, by reading opDB.\n The spec file can be input to generateCommands.py\n to generate actual commands.\n \"\"\")\n parser.add_argument(\"--detectorMapDir\", type=str, help=\"\"\"\n Directory that contains initial detector maps.\n If you want to inscribe environment variables as environment variables\n in the output file, escape the $ sign when calling this program.\n \"\"\")\n parser.add_argument(\"output\", type=str, help=\"\"\"\n Output file name. Should usually end with \".yaml\".\n \"\"\")\n parser.add_argument(\"-d\", \"--dbname\", type=str, help=\"\"\"\n Database name of opDB. For example, -d \"dbname=opdb host=example.com\".\n \"\"\")\n parser.add_argument(\"--maxarcs\", type=int, default=10, help=\"\"\"\n Max number of arc visits to use for making one detectorMap.\n \"\"\")\n # options for SelectionCriteria follow.\n parser.add_argument(\"--date-start\", type=dateutil.parser.parse, help=\"\"\"\n Choose only those records with `pfs_visit.issued_at >= date_start`.\n \"\"\")\n parser.add_argument(\"--date-end\", type=dateutil.parser.parse, help=\"\"\"\n Choose only those records with `pfs_visit.issued_at < date_end`.\n \"\"\")\n parser.add_argument(\"--visit-start\", type=int, help=\"\"\"\n Choose only those records with `pfs_visit.pfs_visit_id >= visit_start`.\n \"\"\")\n parser.add_argument(\"--visit-end\", type=int, help=\"\"\"\n Choose only those records with `pfs_visit.pfs_visit_id < visit_end`.\n \"\"\")\n parser.add_argument(\"--config\", type=ConfigOverrides.fromDirectory, default=ConfigOverrides(), help=\"\"\"\n Configuration override directory.\n \"\"\")\n args = parser.parse_args()\n args.criteria = SelectionCriteria.fromNamespace(args, remove=True)\n\n if args.dbname is None:\n args.dbname = getDefaultDBName()\n\n generateReductionSpec(**vars(args))", "def main():\n download_insert_title_basics()\n download_insert_title_principals()\n download_insert_name_basics()\n download_insert_title_ratings()\n scrap_keywords()\n create_and_insert_soup()\n return", "def main():\n print \"\\nWelcome to the Bloom lab Reed-Muench calculator.\\n\"\n infile = None\n while not infile:\n infile = raw_input(\"Enter the name of the input file in text format: \").strip()\n if os.path.isfile(infile):\n break\n elif infile in ['Q', 'q']:\n print \"Quitting.\"\n sys.exit()\n else:\n infile = None\n print \"Failed to find the specified input file of %s. Try again to enter a valid file name, or enter Q to quit.\" % infile\n print \"Reading input from the file %s.\" % infile\n (samplenames, sampledata, volume, dilution) = ParseInput(infile)\n print \"Read data for %d samples.\" % len(sampledata)\n titers = {}\n for (sample, data) in sampledata.iteritems():\n titers[sample] = Titer(data, volume, dilution)\n print \"\\nHere are the computed titers in TCID50 per ul:\"\n for sample in samplenames:\n print \"%s: %.3f\" % (sample, titers[sample])\n (base, ext) = os.path.splitext(infile)\n outfile = '%s-titers.txt' % base\n print \"\\nNow we will write these titers to the output file %s.\" % outfile\n if AskOverwrite(outfile):\n f = open(outfile, 'w')\n f.write(\"Here are the computed titers in TCID50 per ul.\\n\")\n for sample in samplenames:\n f.write(\"%s:\\t%.3f\\n\" % (sample, titers[sample]))\n f.close()", "def main():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='operation')\n\n pparser = subparsers.add_parser('process',\n description='Processes tweets')\n inputgroup = pparser.add_mutually_exclusive_group()\n inputgroup.add_argument('-f', '--files', nargs='+')\n inputgroup.add_argument('-q', '--queries', nargs='+')\n pparser.add_argument('-wc', '--wordcloud', action='store_true')\n pparser.add_argument('-n', type=int, default=300, dest='count')\n\n gparser = subparsers.add_parser('get',\n description='Gets tweets and writes them into files')\n gparser.add_argument('queries', nargs='+')\n gparser.add_argument('-n', type=int, default=300, dest='count')\n iparser = subparsers.add_parser('interactive',\n description='Manually input messages for evaluation')\n args = parser.parse_args()\n \n \n if args.operation == 'get':\n query_tweets_to_files(args.queries, args.count)\n print(\"Tweets successfully saved to disk\")\n elif args.operation == 'process':\n model, w2v_model = load_models()\n if args.queries:\n tweets = [(query,\n tc.query_tweets(query, args.count)) for query in args.queries]\n else:\n tweets = [(os.path.splitext(os.path.basename(f))[0],\n tc.read_tweets_from_file(f)) for f in args.files]\n analyze_tweets(tweets, model, w2v_model)\n print(\"Images successfully saved to disk.\")\n elif args.operation == 'interactive':\n model, w2v_model = load_models()\n interactive(model, w2v_model)", "def main():\r\n\t# If using the command line, parse it into a dictionary; otherwise save as None\r\n\tif len(sys.argv) > 1:\r\n\t\topt_dict = parse_command_line(sys.argv[1:])\r\n\telse:\r\n\t\topt_dict = None\r\n\t\r\n\t# Get module from user or from command line\r\n\tif opt_dict == None:\r\n\t\tprint(\"===== MENU ===== \"\r\n\t\t\t \"\\n1. make new .lab files (relabel) \"\r\n\t\t\t \"\\n2. reformat existing .lab files (clean) \"\r\n\t\t\t \"\\n3. make a dictionary from existing .lab file text \"\r\n\t\t\t \"\\nPlease enter the number for the option you would like to select:\")\r\n\t\tvalue = input(\"> \")\r\n\t\tassert value in [\"1\", \"2\", \"3\"]\r\n\telse:\r\n\t\tvalue = opt_dict[\"module\"] # stored in opt_dict\r\n\t\r\n\t# For each module, check dictionary and pass on\r\n\tif value == \"1\":\r\n\t\t# get language\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nmake new .lab files (relabel)\")\r\n\t\t\t\r\n\t\t\tprint(\"\\nWhat language are your .lab files in? Please use two-character language code\")\r\n\t\t\tprint(\"\\nRecognised codes are en, en-celex, en-timit, fr, de, nl\")\r\n\t\t\tprint(\"\\nIf your language is not listed, please leave this field blank\")\r\n\t\t\tlang = input(\"> \").lower()\r\n\t\telse:\r\n\t\t\tlang = opt_dict[\"lang\"]\r\n\t\t\r\n\t\t# get tab-delimited text files\r\n\t\tif opt_dict == None or opt_dict[\"text file\"] == None:\r\n\t\t\tprint(\"\\nWhat are the tab-delimited text files that contain the lab file text (experiment files)? (must include directory) \"\r\n\t\t\t\t \"\\nYou can drag and drop the files into the Terminal window to fill out this space \"\r\n\t\t\t\t \"\\nWARNING: No individual directory should include a space chacter\"\r\n\t\t\t\t \"\\nIf so, please go back and replace any spaces with underscores\")\r\n\t\t\texp_files = input(\"> \")\r\n\t\t\tif exp_files[-1] == \" \":\r\n\t\t\t\texp_files = exp_files[:-1]\r\n\t\telse:\r\n\t\t\texp_files = opt_dict[\"text file\"]\r\n\t\t\r\n\t\t# get column names\"amli\"\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nWhat are the columns that identify the lab text (id columns of the experiment file)? \"\r\n\t\t\t\t \"\\nSeparate all id columns with an underscore \"\r\n\t\t\t\t \"\\nDefault is: experiment_item_condition Press enter to use default\")\r\n\t\t\texp_cols = str(input(\"> \"))\r\n\t\t\tif exp_cols == \"\":\r\n\t\t\t\texp_cols = \"experiment_item_condition\"\r\n\t\telse:\r\n\t\t\texp_cols = opt_dict[\"columns\"]\r\n\t\t\r\n\t\t# get file directory\r\n\t\tif opt_dict == None or opt_dict[\"file dir\"] == None:\r\n\t\t\tprint(\"\\nWhat is the sound directory? \"\r\n\t\t\t\t \"\\nYou can drag and drop the file into the Terminal window to fill out this space\")\r\n\t\t\tfile_dir = input(\"> \")\r\n\t\t\tfile_dir = name_check(file_dir)\r\n\t\telse:\r\n\t\t\tfile_dir = opt_dict[\"file dir\"]\r\n\t\t\r\n\t\t# get file name format\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nWhat format are the sound file names in? (What are the id columns of the sound file?) \"\r\n\t\t\t\t \"\\nDefault is: experiment_participant_item_condition Press enter to use default\")\r\n\t\t\tformat = str(input(\"> \"))\r\n\t\t\tif format == '':\r\n\t\t\t\tformat = \"experiment_participant_item_condition\"\r\n\t\telse:\r\n\t\t\tformat = opt_dict[\"format\"]\r\n\t\t\r\n\t\t# get old directory name\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nWhat would you like to call the directory for the old lab files? \"\r\n\t\t\t\t \"\\nDefault is: 0_old_labfile_relabel/ Press enter to use default\")\r\n\t\t\told_dir = input(\"> \")\r\n\t\t\tif old_dir == '':\r\n\t\t\t\told_dir = \"0_old_labfile_relabel/\"\r\n\t\t\told_dir = name_check(old_dir)\r\n\t\telse:\r\n\t\t\told_dir = opt_dict[\"old dir\"]\r\n\t\t\r\n\t\t# get dictionary option\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nWould you like to create a dictionary from the lab file text?\")\r\n\t\t\tprint(\"Please answer 'y' or 'yes' if so\")\r\n\t\t\td_choice = input(\"> \").lower()\r\n\t\t\tif d_choice == \"y\" or d_choice == \"yes\":\r\n\t\t\t\tdict = True\r\n\t\t\telse:\r\n\t\t\t\tdict = False\r\n\t\telse:\r\n\t\t\tdict = opt_dict[\"dict\"]\r\n\t\t\r\n\t\t# pass on\r\n\t\trelabel_module(exp_files, exp_cols, file_dir, format, old_dir, lang, dict)\r\n\t\tsys.exit(0)\r\n\t\r\n\telif value == \"2\":\r\n\t\t# get language\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nreformat existing .lab files (clean)\")\r\n\t\t\t\r\n\t\t\tprint(\"\\nWhat language are your .lab files in? Please use the two-character language code\")\r\n\t\t\tprint(\"Recognised codes are en, en-celex, en-timit, fr, de, nl\")\r\n\t\t\tprint(\"If your language is not listed, please leave this field blank\")\r\n\t\t\tlang = input(\"> \").lower()\r\n\t\telse:\r\n\t\t\tlang = opt_dict[\"lang\"]\r\n\t\t\r\n\t\t# get file directory\r\n\t\tif opt_dict == None or opt_dict[\"file dir\"] == None:\r\n\t\t\tprint(\"\\nWhat is the sound directory? \"\r\n\t\t\t\t \"\\nYou can drag and drop the file into the Terminal window to fill out this space\")\r\n\t\t\tfile_dir = input(\"> \")\r\n\t\t\tfile_dir = name_check(file_dir)\r\n\t\telse:\r\n\t\t\tfile_dir = opt_dict[\"file dir\"]\r\n\t\t\t\r\n\t\t# get the name of the old directory\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nWhat would you like to call the directory for old lab files? Default is: 0_old_labfile_clean/ Press enter to use default\")\r\n\t\t\told_dir = input(\"> \")\r\n\t\t\tif old_dir == '':\r\n\t\t\t\told_dir = \"0_old_labfile_clean/\"\r\n\t\t\told_dir = name_check(old_dir)\r\n\t\telse:\r\n\t\t\told_dir = opt_dict[\"old dir\"]\r\n\t\t\r\n\t\t# see if the user wants to use a dictionary\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nWould you like to create a dictionary from the lab file text?\")\r\n\t\t\tprint(\"Please answer 'y' or 'yes' if so\")\r\n\t\t\td_choice = input(\"> \").lower()\r\n\t\t\tif d_choice == \"y\" or d_choice == \"yes\":\r\n\t\t\t\tdict = True\r\n\t\t\telse:\r\n\t\t\t\tdict = False\r\n\t\telse:\r\n\t\t\tdict = opt_dict[\"dict\"]\r\n\t\t\r\n\t\t# pass on\r\n\t\tclean_module(file_dir, old_dir, lang, dict)\r\n\t\tsys.exit(0)\r\n\t\t\r\n\telif value == \"3\":\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nmake a dictionary from existing .lab file text\")\r\n\t\t\r\n\t\tif opt_dict == None or opt_dict[\"file dir\"] == None:\r\n\t\t\tprint(\"\\nWhat is the .lab file directory? \"\r\n\t\t\t\t \"\\nYou can drag and drop the file into the Terminal window to fill out this space\")\r\n\t\t\tfile_dir = input(\"> \")\r\n\t\t\tfile_dir = name_check(file_dir)\r\n\t\telse:\r\n\t\t\tfile_dir = opt_dict[\"file dir\"]\r\n\t\t\r\n\t\tcreate_dictionary(file_dir)\r\n\t\tsys.exit(0)", "def main():\n ref_seq = {}\n ent_spe_sero = {}\n tag_dict = {\"Contigs_with_VP1\":\"contigs\", \"P1_sequences\":\"p1\",\n \"VP1_sequences\":\"vp1\", \"5UTR_sequences\":\"5utr\", \"3D_sequences\":\"3d\"}\n args = get_arguments()\n # Load query elements\n print(\"Load resume file\")\n (query_dict, classify_list,\n classify_specie_list, serotype_list) = get_query(args.resume_file,\n args.tag,\n args.incomplete)\n print(\"{} descriptions loaded\".format(len(query_dict)))\n # Load specie association\n if args.ent_serotype_file and args.template_seq_file:\n # Load enterovirus serotype\n print(\"Load enterovirus serotype association\")\n ent_spe_sero = load_spe_sero(args.ent_serotype_file)\n # Load template sequence\n print(\"Load template sequence\")\n ref_seq = get_template_sequence(args.template_seq_file, ent_spe_sero)\n # Grab query sequence in the database\n print(\"Load database sequence\")\n sequence_data = get_sequence(query_dict, args.fasta_file)\n print(\"{} sequences loaded\".format(len(sequence_data)))\n # Write the new fasta file\n print(\"Write the new fasta\")\n write_sequence(args.results, sequence_data, query_dict, classify_list,\n tag_dict[args.tag], ref_seq, ent_spe_sero)\n #print(save_association)\n print(\"Write the itol label\")\n write_itol_label(args.itol_dir, sequence_data, query_dict, classify_list,\n tag_dict[args.tag])\n print(\"Write the itol tree color\")\n write_itol_tree_color(args.itol_dir, sequence_data, query_dict, classify_specie_list, serotype_list,\n tag_dict[args.tag])\n print(\"Done\")", "def main():\n\n ############################ variable settings #################################\n parser = argparse.ArgumentParser(description='Run Subtask C of GermEval 2017 Using Pre-Trained Language Model.')\n parser.add_argument('--seed', type=int, default=42, help='Random seed.')\n parser.add_argument('--lang_model', type=str, default='bert-base-german-dbmdz-uncased', help='The pre-trained language model.')\n parser.add_argument('--epochs', type=int, default=4, help='Number of epochs for training.')\n parser.add_argument('--lr', type=float, default=5e-5, help='The learning rate.')\n parser.add_argument('--max_len', type=int, default=256, help='The maximum sequence length of the input text.')\n parser.add_argument('--batch_size', type=int, default=32, help='Your train set batch size.')\n parser.add_argument('--df_path', type=str, default='./data/', help='The data directory.') \n parser.add_argument('--train_data', type=str, default='train_df_cat.tsv', help='The filename of the input train data.')\n parser.add_argument('--dev_data', type=str, default='dev_df_cat.tsv', help='The filename of the input development data.')\n parser.add_argument('--test_data1', type=str, default='test_syn_df_cat.tsv', help='The filename of the first input test data (synchronic).')\n parser.add_argument('--test_data2', type=str, default='test_dia_df_cat.tsv', help='The filename of the second input test data (diachronic).')\n parser.add_argument('--output_path', type=str, default='./output/subtaskC/', help='The output directory of the model and predictions.')\n parser.add_argument(\"--train\", default=True, action=\"store_true\", help=\"Flag for training.\")\n parser.add_argument(\"--save_prediction\", default=False, action=\"store_true\", help=\"Flag for saving predictions.\")\n parser.add_argument(\"--save_cr\", default=False, action=\"store_true\", help=\"Flag for saving confusion matrix.\")\n parser.add_argument(\"--exclude_general\", default=False, action=\"store_true\", help=\"Flag for excluding category Allgemein.\")\n parser.add_argument(\"--exclude_neutral\", default=False, action=\"store_true\", help=\"Flag for excluding neutral polarity.\")\n parser.add_argument(\"--exclude_general_neutral\", default=False, action=\"store_true\", help=\"Flag for excluding category Allgemein:neutral.\")\n args = parser.parse_args()\n ################################################################################\n set_all_seeds(args.seed)\n device, n_gpu = initialize_device_settings(use_cuda=True)\n \n # Load data\n train_df = pd.read_csv(args.df_path + args.train_data, delimiter = '\\t')\n dev_df = pd.read_csv(args.df_path + args.dev_data, delimiter = '\\t')\n test_syn_df = pd.read_csv(args.df_path + args.test_data1, delimiter = '\\t')\n test_dia_df = pd.read_csv(args.df_path + args.test_data2, delimiter = '\\t')\n \n # Create a tokenizer\n lower_case = False\n if args.lang_model[-7:] == \"uncased\":\n lower_case = True\n\n if args.lang_model[:4] == \"bert\":\n model_class = \"BERT\"\n tokenizer = BertTokenizer.from_pretrained(args.lang_model, do_lower_case=lower_case, max_length=args.max_len)\n \n if args.lang_model[:10] == \"distilbert\":\n model_class = \"DistilBERT\"\n tokenizer = DistilBertTokenizer.from_pretrained(args.lang_model, do_lower_case=lower_case, max_length=args.max_len)\n \n\n # get training features\n cats = train_df.columns[5:]\n end = \"full\"\n # exclude categories if required\n if (args.exclude_general):\n cats = [i for i in list(cats) if \"Allgemein\" not in i]\n end = \"excl_gen\"\n if (args.exclude_neutral):\n cats = [i for i in list(cats) if \"neutral\" not in i]\n end = \"excl_neu\"\n if (args.exclude_general_neutral):\n cats = [i for i in list(cats) if \"Allgemein:neutral\" not in i]\n end = \"excl_genneu\"\n \n num_labels = len(list(cats))\n\n # create one hot labels\n train_df['one_hot_labels'] = list(train_df[list(cats)].values)\n dev_df['one_hot_labels'] = list(dev_df[list(cats)].values)\n test_syn_df['one_hot_labels'] = list(test_syn_df[list(cats)].values)\n test_dia_df['one_hot_labels'] = list(test_dia_df[list(cats)].values)\n\n # retrieve sentences and labels\n df = pd.concat([train_df, dev_df])\n sentences = df.text.values\n labels = list(df.one_hot_labels.values) \n\n sentences_syn = test_syn_df.text.values\n labels_syn = list(test_syn_df.one_hot_labels.values)\n\n sentences_dia = test_dia_df.text.values\n labels_dia = list(test_dia_df.one_hot_labels.values)\n \n print(\"number of categories:\", len(list(cats)))\n\n # Tokenize all of the sentences and map the tokens to their word IDs. \n input_ids = [tokenizer.encode(sent, add_special_tokens=True, truncation=True, \n max_length=args.max_len) for sent in sentences]\n input_ids = pad_sequences(input_ids, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n # Create attention masks\n attention_masks = [[int(token_id > 0) for token_id in sent] for sent in input_ids]\n \n # synchronic test data\n input_ids_syn = [tokenizer.encode(sent, add_special_tokens=True, truncation=True) for sent in sentences_syn]\n input_ids_syn = pad_sequences(input_ids_syn, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n attention_masks_syn = [[int(token_id > 0) for token_id in sent] for sent in input_ids_syn]\n \n # diachronic test data\n input_ids_dia = [tokenizer.encode(sent, add_special_tokens=True, truncation=True) for sent in sentences_dia]\n input_ids_dia = pad_sequences(input_ids_dia, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n attention_masks_dia = [[int(token_id > 0) for token_id in sent] for sent in input_ids_dia]\n\n # split train, dev\n train_inputs, train_labels, dev_inputs, dev_labels, train_masks, dev_masks = split_train_dev(\n train_df, dev_df, attention_masks, input_ids, labels)\n \n # transform to torch tensor\n train_inputs = torch.tensor(train_inputs)\n dev_inputs = torch.tensor(dev_inputs)\n\n train_labels = torch.tensor(train_labels)\n dev_labels = torch.tensor(dev_labels)\n\n train_masks = torch.tensor(train_masks)\n dev_masks = torch.tensor(dev_masks)\n\n test_syn_inputs = torch.tensor(input_ids_syn)\n test_syn_masks = torch.tensor(attention_masks_syn)\n test_syn_labels = torch.tensor(labels_syn)\n\n test_dia_inputs = torch.tensor(input_ids_dia)\n test_dia_masks = torch.tensor(attention_masks_dia)\n test_dia_labels = torch.tensor(labels_dia)\n\n # Create the DataLoader\n train_dataloader = create_dataloader(train_inputs, train_masks, \n train_labels, args.batch_size, train = True)\n\n dev_dataloader = create_dataloader(dev_inputs, dev_masks, \n dev_labels, args.batch_size, train = False)\n\n test_syn_dataloader = create_dataloader(test_syn_inputs, test_syn_masks, \n test_syn_labels, args.batch_size, \n train = False)\n\n test_dia_dataloader = create_dataloader(test_dia_inputs, test_dia_masks, \n test_dia_labels, args.batch_size, \n train = False)\n\n # Create model\n if args.train:\n if model_class == \"BERT\":\n config = BertConfig.from_pretrained(args.lang_model, num_labels=num_labels) \n config.hidden_dropout_prob = 0.1 \n model = BertForSequenceClassification.from_pretrained(\n args.lang_model,\n num_labels = num_labels,\n output_attentions = False,\n output_hidden_states = False\n )\n\n if model_class == \"DistilBERT\":\n config = DistilBertConfig.from_pretrained(args.lang_model, num_labels=num_labels) \n config.hidden_dropout_prob = 0.1 \n model = DistilBertForSequenceClassification.from_pretrained(\n args.lang_model,\n num_labels = num_labels,\n output_attentions = False,\n output_hidden_states = False\n )\n model.cuda()\n\n\n # Create an optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.0}\n ]\n optimizer = AdamW(\n optimizer_grouped_parameters,\n lr=args.lr,\n eps = 1e-8\n )\n # Total number of training steps = number of batches * number of epochs\n total_steps = len(train_dataloader) * args.epochs\n # Create the learning rate scheduler\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=total_steps\n )\n \n # train model\n # Main Loop\n print(\"=================== Train ================\")\n print(\"##### Language Model:\", args.lang_model, \",\", \"learning rate:\", args.lr)\n print()\n\n track_time = time.time()\n # trange is a tqdm wrapper around the normal python range\n for epoch in trange(args.epochs, desc=\"Epoch\"):\n print(\"Epoch: %4i\"%epoch, dt.datetime.now())\n\n model, optimizer, scheduler, tr_loss = train_multilabel(\n train_dataloader=train_dataloader, \n model=model, \n device=device, \n optimizer=optimizer, \n scheduler=scheduler, \n num_labels=num_labels\n )\n # EVALUATION: TRAIN SET\n pred_bools_train, true_bools_train, f1_train = eval_multilabel(\n train_dataloader, model=model, device=device)\n print(\"TRAIN: micro F1 %.3f\"%(f1_train))\n \n # EVALUATION: DEV SET\n pred_bools_dev, true_bools_dev, f1_dev = eval_multilabel(\n dev_dataloader, model=model, device=device)\n print(\"EVAL: micro F1 %.3f\"%(f1_dev))\n \n\n print(\" Training and validation took in total: {:}\".format(format_time(time.time()-track_time)))\n\n # EVALUATION: TEST SYN SET\n pred_bools_syn, true_bools_syn, f1_test_syn = eval_multilabel(\n test_syn_dataloader, model=model, device=device)\n print(\"TEST SYN: micro F1 %.4f\"%(f1_test_syn))\n\n # classification report\n clf_report_syn = classification_report(true_bools_syn, pred_bools_syn, target_names=cats, digits=3)\n print(clf_report_syn)\n\n\n # EVALUATION: TEST DIA SET\n pred_bools_dia, true_bools_dia, f1_test_dia = eval_multilabel(\n test_dia_dataloader, model=model, device=device\n )\n print(\"TEST DIA: micro F1 %.4f\"%(f1_test_dia))\n\n # classification report\n clf_report_dia = classification_report(true_bools_dia, pred_bools_dia, target_names=cats, digits=3)\n print(clf_report_dia)\n \n if args.save_cr:\n pickle.dump(clf_report_syn, open(args.output_path+'clf_report_'+args.lang_model+'_test_syn_'+str(num_labels)+end+'.txt','wb'))\n pickle.dump(clf_report_dia, open(args.output_path+'clf_report_'+args.lang_model+'_test_dia_'+str(num_labels)+end+'.txt','wb'))\n\n\n if args.save_prediction:\n test_syn_df[\"category_pred\"] = pred_bools_syn\n test_dia_df[\"category_pred\"] = pred_bools_dia\n test_syn_df.category_pred.to_csv(args.output_path+args.lang_model+'_test_syn_'+str(num_labels)+end+\".tsv\", \n sep=\"\\t\", index = False, header = True, encoding = \"utf-8-sig\")\n test_dia_df.category_pred.to_csv(args.output_path+args.lang_model+'_test_dia_'+str(num_labels)+end+\".tsv\", \n sep=\"\\t\", index = False, header = True, encoding = \"utf-8-sig\")", "def main(args=None):\n if args is None:\n parser = create_parser()\n args = parser.parse_args()\n\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n input_module = input_mapping[args.input_reader]\n output_module = output_mapping[args.output_format]\n\n templates = []\n # Load templates from external folder if set.\n if args.template_folder:\n templates += read_templates(os.path.abspath(args.template_folder))\n\n # Load internal templates, if not disabled.\n if not args.exclude_built_in_templates:\n templates += read_templates()\n output = []\n for f in args.input_files:\n res = extract_data(f.name, templates=templates, input_module=input_module)\n if res:\n logger.info(res)\n output.append(res)\n if args.copy:\n filename = args.filename.format(\n date=res['date'].strftime('%Y-%m-%d'),\n invoice_number=res['invoice_number'],\n desc=res['desc'],\n )\n shutil.copyfile(f.name, join(args.copy, filename))\n if args.move:\n filename = args.filename.format(\n date=res['date'].strftime('%Y-%m-%d'),\n invoice_number=res['invoice_number'],\n desc=res['desc'],\n )\n shutil.move(f.name, join(args.move, filename))\n f.close()\n\n if output_module is not None:\n output_module.write_to_file(output, args.output_name, args.output_date_format)", "def main():\n parser = argparse.ArgumentParser(description=MAIN_DESCRIPTION)\n parser.add_argument('-a', '--algorithm', help=ALGORITHM_DESCRIPTION)\n parser.add_argument('-n', '--number', type=int, help=NUMBER_DESCRIPTION)\n parser.add_argument('-o', '--order', help=ORDER_DESCRIPTION)\n parser.add_argument('-s', '--size', help=SIZE_DESCRIPTION)\n args = parser.parse_args()\n try:\n if not (args.algorithm and args.number and args.order and args.size):\n raise ValueError\n create_structure()\n try:\n data = get_data(args.number, args.order, args.size)\n except IOError:\n data = generate_in_files(args.number, args.order, args.size)\n finally:\n alg, out, time = sorting_algorithm(data, args.algorithm)\n # generate_out_files(out, args.number)\n generate_log_file(args.algorithm, args.number, args.order,\n args.size, alg.compares, alg.moves, time)\n except (TypeError, UnboundLocalError, ValueError) as e:\n parser.print_help()", "def main():\n\t\n\tglobal debug\n\tct = 0\n\tfor opt in sys.argv[1:]:\n\t\tif opt[0] != \"-\": break\n\t\tct = ct + 1\n\t\tif opt == \"-d\": debug = True\n\tif len(sys.argv) < 2+ct:\n\t\tprint (\"Usage: %s filename\" % sys.argv[0])\n\t\treturn\n\tparse(\"\".join(mklines(sys.argv[1+ct])))\n\treturn", "def main():\r\n \r\n # Set verbose if -v option is given as argument.\r\n if len(sys.argv) >= 3:\r\n if sys.argv[2] == '-v':\r\n print('Verbose Mode Activated\\n')\r\n verbose = True\r\n \r\n # Open the file given as argument in read-only mode.\r\n filehandle = open(sys.argv[1], 'r')\r\n textinput = filehandle.read()\r\n # print('\\n-----------INPUT TEXT-------------\\n')\r\n print(textinput,'\\n')\r\n # print('\\n-----------INPUT END---------------\\n')\r\n \r\n # Send the content of text file as string to function parse()\r\n questions, answers = extractQuestions(textinput, 10)\r\n # print(\"Questions \\n ----------------\")\r\n # print(questions)\r\n # print(\"\\n Answers \\n ----------------\")\r\n # print(answers)", "def main():\n\tparser = argparse.ArgumentParser(\n\t\tusage = '%(prog)s [OPTIONS] [ARGS...]',\n\t\tdescription='Calculate something',\n\t\tepilog='Contact simon.clematide@uzh.ch'\n\t\t)\n\tparser.add_argument('--version', action='version', version='0.99')\n\tparser.add_argument('-l', '--logfile', dest='logfile',\n\t\t\t\t\t\thelp='write log to FILE', metavar='FILE')\n\tparser.add_argument('-q', '--quiet',\n\t\t\t\t\t\taction='store_true', dest='quiet', default=False,\n\t\t\t\t\t\thelp='do not print status messages to stderr')\n\tparser.add_argument('-d', '--debug',\n\t\t\t\t\t\taction='store_true', dest='debug', default=False,\n\t\t\t\t\t\thelp='print debug information')\n\tparser.add_argument('-s', '--lm_dir',\n\t\t\t\t\t\taction='store', dest='lm_dir', default='resources.d/taggers/language-model/',\n\t\t\t\t\t\thelp='directory where LMs are stored %(default)')\n\tparser.add_argument('-i', '--iob_dir',\n\t\t\t\t\t\taction='store', dest='iob_dir', default='data.d/quaero/quaero_iob',\n\t\t\t\t\t\thelp='directory where iob training material is located %(default)')\n\tparser.add_argument('-t', '--tagger_dir',\n\t\t\t\t\t\taction='store', dest='tagger_dir', default='resources.d/taggers',\n\t\t\t\t\t\thelp='directory where to store training output %(default)')\n\tparser.add_argument('-n', '--ner_cycle',\n\t\t\t\t\t\taction='store', dest='ner_cycle', default='ner',\n\t\t\t\t\t\thelp='ner experiment cycle %(default)')\n\tparser.add_argument('-c', '--correction_mode',\n\t\t\t\t\t\taction='store', dest='correction_mode', default='raw',\n\t\t\t\t\t\thelp='correction mode of the NEs in training data %(default)')\n\tparser.add_argument('-m', '--lm_domain',\n\t\t\t\t\t\taction='store', dest='lm_domain', default='pressfr',\n\t\t\t\t\t\thelp='character level language model domain %(default)')\n\tparser.add_argument('-p', '--train_patience',\n\t\t\t\t\t\taction='store', dest='train_patience', type=int, default=3,\n\t\t\t\t\t\thelp='training patience %(default)')\n\tparser.add_argument('-W', '--use_wiki_wordemb',\n\t\t\t\t\t\taction='store_true', dest='use_wiki_wordemb', default=False,\n\t\t\t\t\t\thelp='use pre-trained wiki word embeddings')\n\tparser.add_argument('-P', '--use_press_wordemb',\n\t\t\t\t\t\taction='store_true', dest='use_press_wordemb', default=False,\n\t\t\t\t\t\thelp='use indomain press word embeddings')\n\tparser.add_argument('-C', '--use_crf',\n\t\t\t\t\t\taction='store_true', dest='use_crf', default=False,\n\t\t\t\t\t\thelp='use CRF layer')\n\tparser.add_argument('args', nargs='*')\n\toptions = parser.parse_args()\n\tif options.logfile:\n\t\tlogging.basicConfig(filename=logfile)\n\tif options.debug:\n\t\tlogging.basicConfig(level=logging.DEBUG)\n\n\ttrain_tagger(options)", "def IO():\n parser=argparse.ArgumentParser(description='PACKMAN: PACKing and Motion ANalysis. (https://github.com/Pranavkhade/PACKMAN)\\n\\nFollowing Apps Available: \\n1. hinge \\n2. hdanm\\n3. entropy\\n4. dci\\n\\nHow to run an app: python -m packman <app name>\\nExample: python -m packman hinge', formatter_class=argparse.RawTextHelpFormatter )\n subparsers = parser.add_subparsers(dest='command')\n\n #Hinge Prediction\n hinge_app_io = subparsers.add_parser('hinge')\n hinge_app_io.add_argument('-pdbid','--pdbid', metavar='PDB_ID', type=str, help='If provided, the PBD with this ID will be downloaded and saved to FILENAME.')\n hinge_app_io.add_argument('alpha', metavar='AlphaValue', help='Recommended: 2.8 for closed; 4.5 for open form, Please refer to the paper for more details')\n hinge_app_io.add_argument('filename', metavar='FILENAME', help='Path and filename of the PDB file.')\n hinge_app_io.add_argument('--e_clusters',metavar='NumberOfEccentricityClusters',type=int,default=4,help='Recommended: 4, Please refer to the paper for more details')\n hinge_app_io.add_argument('--minhnglen',metavar='MinimumHingeLength',type=int,default=5,help='Recommended: 5, Please refer to the paper for more details')\n hinge_app_io.add_argument(\"--chain\", help='Enter The Chain ID')\n hinge_app_io.add_argument('--generateobj', type=argparse.FileType('wb', 0), help='Path and filename to save the .obj file at. Ignored unless --chain is provided.')\n\n #hdanm\n hd_anm_io = subparsers.add_parser('hdanm')\n hd_anm_io.add_argument('-pdbid','--pdbid', metavar='PDB_ID', type=str, help='If provided, the PBD with this ID will be downloaded and saved to FILENAME.')\n hd_anm_io.add_argument('filename', metavar='FILENAME', help='Path and filename of the PDB file.')\n hd_anm_io.add_argument('hngfile', metavar='HNG', help='Path and filename of the corresponding HNG file.')\n hd_anm_io.add_argument(\"--chain\", help='Enter The Chain ID')\n hd_anm_io.add_argument(\"--dr\", type=float, default=15, help='Distance cutoff for the ANM.')\n hd_anm_io.add_argument(\"--power\", type=float, default=0, help='Power of the distance in non-parametric ANM.')\n hd_anm_io.add_argument(\"--mass\", default='residue', help='Mass of the residue; unit or molecular weight')\n hd_anm_io.add_argument(\"--scale\", type=int, default=2, help='movie scale')\n hd_anm_io.add_argument(\"--frames\", type=int, default=10, help='number of frames')\n hd_anm_io.add_argument(\"--modes\", type=int, default=10, help='how many modes')\n hd_anm_io.add_argument(\"--ca_to_aa\", action=argparse.BooleanOptionalAction, type=bool, default=False, help='Project CA motion on all atoms.')\n hd_anm_io.add_argument(\"--make_tar\", action='store_true', help='package output files into a tar.gz file')\n\n #Entropy\n entropy_app_io = subparsers.add_parser('entropy')\n entropy_app_io.add_argument('-type','--type', metavar='entropy_type', type=str, help='Provide the Entropy type (Options: 1. PackingEntropy)')\n entropy_app_io.add_argument('-pdbid','--pdbid', metavar='PDB_ID', type=str, help='If provided, the PBD with this ID will be downloaded and saved to FILENAME.')\n entropy_app_io.add_argument('filename', metavar='FILENAME', help='Path and filename of the PDB file.')\n entropy_app_io.add_argument('--chains',metavar='Chains to be used for the entropy calculation',type=str,default=None, help='Recommended: None. Chain IDs for the Entropy calculation (None means all the chains are included; single string means only one chain ID; multiple chains should be comma separated).')\n entropy_app_io.add_argument('--probe_size',metavar='Size surface probe radius',type=float,default=1.4, help='Recommended: 1.4 (radius of a water molecule), Please refer to the paper for more details')\n entropy_app_io.add_argument('--onspherepoints',metavar='Number of points on a sphere',type=int,default=30, help='Recommended: 30. Number of points to be generated around each point for the surface (Read the Publication for more details)')\n\n #DCI\n dci_app_io = subparsers.add_parser('dci')\n dci_app_io.add_argument('-pdbid','--pdbid', metavar='PDB_ID', type=str, help='If provided, the PBD with this ID will be downloaded and saved to FILENAME.')\n dci_app_io.add_argument('filename', metavar='FILENAME', help='Path and filename of the PDB file.')\n dci_app_io.add_argument('-chain','--chain', help='Enter The Chain ID', default=None)\n dci_app_io.add_argument('-cutoff','--cutoff', type=float, help='Enter the cutoff for DCI. (Read the Publication for more details)', default=7.0)\n dci_app_io.add_argument('-n_com','--n_com', type=int, help='Enter the number of communities. (Read the Publication for more details)', default=None)\n\n # web server parameters\n web_server_group = parser.add_argument_group('Web server parameters', 'Used by the web form')\n web_server_group.add_argument('--outputfile', type=argparse.FileType('w', 1), default=sys.stdout, help='Path and filename write output to')\n web_server_group.add_argument('--logfile', type=argparse.FileType('w', 1), default=sys.stderr, help='Path and filename write log messages to')\n \n args=parser.parse_args()\n return args", "def main():\n args = utils.read_arguments(__doc__)\n documents = []\n filenames = list(traverse_directory(args[\"input_dirpath\"],'*clean*.txt'))\n labels_dirname = args[\"labels_dirpath\"]\n labels_from_json = get_all_labels_from_json(labels_dirname)\n for filename in tqdm(filenames):\n with AnnotatedIBMFactory(filename) as instance_extractor:\n filename_key = filename.split(\"/\")[-1]\n document = instance_extractor.build_document(\n labels_from_json[filename_key])\n documents.append(document)\n utils.pickle_to_file(documents, args['output_file'])", "def main(argv):\n\n # test strings\n input_strings = [\n \"Foursemestersofchemistry(CHM111/113,CHM112/114,CHM211/213,andCHM212-/214),twosemestersofphysics (PCS201/20\" +\n \"3andPCS202/204) andmathematicsthroughcalculusI(MAT160)arerequired.\",\n \"Mathematicsmajorsarerequiredtocomplete:MAT 160, MAT 260, andMAT 360(CalculusI, II, andIII:12credits), MAT\" +\n \" 302 (LinearAlgebra:3credits), plusatleastfouradditionalupperlevelmathematicscourses(atleast12credits) and\" +\n \"CSC110 (IntroductiontoComputerProgramming:2-4 credits),or MAT151I(Computer ApplicationsforScienceand Mathem\" +\n \"atics: 3 credits).EachstudentmustalsocompleteaJunior Seminar (MAT398/399:2 credits)andSenior Experience(MAT4\" +\n \"98/499: 4 credits)inmathematics. \",\n \"EQUALEDUCATIONALANDEMPLOYMENTOPPORTUNITYPOLICY\",\n \"Caldwell, New Jersey, has seen\\n many changes over the years and was recently ranked by NJMonthly Magazine \" +\n \"as one of the best places to live in all of New Jersey. A short walk brings students to the revitalized \" +\n \"center of town, where shops, a movie theater, wi-fi hot spots andlotsoftastyrestaurantsprovidegreatoptions\" +\n \"forfun. Manyshopsparticipateinadiscount program for Caldwell University students, making the town affordable\" +\n \" on student budgets.\",\n \"If there is anaturaldisaster that interrupts a student’slong-termparticipationinacourse(s),CarlowUniversity\" +\n \"willapprise students oftheoptionsavailabletocompletetheiracademic coursework. \",\n \"PREREQUISITES:BSM103 ANDBSM226. \",\n \"Continuum and atomistic descriptions of diffusion in solids. Reactionsinvolving surfaces and interfaces, in\" +\n \"cluding evaporation, adsorption,grain growth, and coarsening. Phase transformation kinetics, includingnucle\" +\n \"ation, growth, solidification, spinodal decomposition, and martensitictransformations. Analysis of systems \" +\n \"with multiple kinetic mechanisms(typical examples include oxidation, crystal growth, and sintering).Prerequ\" +\n \"isite: background in basic thermodynamics. Recommended:ENGN 1410 or 2410 or equivalent.\",\n \"poststructuralistsmisusethewordduetoNewton'sthirdlaw\"]\n\n for input_string in input_strings:\n processed = punct_split(input_string)\n\n print(input_string)\n print(processed)\n print()", "def main():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='mode')\n\n # Add sub-parser for feature extraction\n parser_extract = subparsers.add_parser('extract')\n parser_extract.add_argument('dataset',\n choices=['training', 'validation', 'test'],\n )\n\n # Add sub-parser for training\n subparsers.add_parser('train')\n\n # Add sub-parser for inference\n parser_predict = subparsers.add_parser('predict')\n parser_predict.add_argument('dataset',\n nargs='?',\n choices=['validation', 'test'],\n default='test',\n )\n\n # Add sub-parser for evaluation\n parser_evaluate = subparsers.add_parser('evaluate')\n parser_evaluate.add_argument('task',\n nargs='?',\n choices=['tagging', 'sed', 'all'],\n default='all',\n )\n parser_evaluate.add_argument('dataset',\n nargs='?',\n choices=['validation', 'test'],\n default='test',\n )\n parser_evaluate.add_argument('--thresholds', action='store_true')\n\n args = parser.parse_args()\n\n if args.mode == 'extract':\n extract(cfg.to_dataset(args.dataset))\n elif args.mode == 'train':\n train()\n elif args.mode == 'predict':\n predict(cfg.to_dataset(args.dataset))\n elif args.mode == 'evaluate':\n eval_all = args.task == 'all'\n dataset = cfg.to_dataset(args.dataset)\n if args.task == 'tagging' or eval_all:\n evaluate_audio_tagging(dataset, args.thresholds)\n if args.task == 'sed' or eval_all:\n evaluate_sed(dataset)", "def main():\n global GOLIVE # If False, it's a dry run only\n global PROJECT_ROOT\n global CAD_SOURCE\n global REVIT_SOURCE\n global GENERIC_SOURCE\n global FOLDER_LIST\n global logger\n\n logger = logging.getLogger('__name__')\n stream_handler = logging.StreamHandler()\n logger.addHandler(stream_handler)\n logger.setLevel(logging.INFO)\n\n logger.debug(sys.argv)\n parser = argparse.ArgumentParser(description='Create a project')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-i', action='store_true', help=\"Show INFO messages\")\n group.add_argument('-d', action='store_true', help=\"Show DEBUG messages\")\n parser.add_argument('-t', action='store_true', help='Test: dry run only')\n parser.add_argument('-r', help=\"Set root directory\")\n parser.add_argument('project_data', nargs='+', help=\"<num>%,<name>%<type>\")\n\n args = parser.parse_args(sys.argv[1:])\n logger.debug(args)\n if args.i:\n logger.info('Setting logging level to INFO')\n logger.setLevel(logging.INFO)\n elif args.d:\n logger.info('Setting logging level to DEBUG')\n logger.setLevel(logging.DEBUG)\n if args.t:\n GOLIVE = False\n logger.info('Dry run...')\n if args.r:\n PROJECT_ROOT = args.r\n logger.info(f'Setting PROJECT_ROOT to {args.r}')\n\n CAD_SOURCE = os.path.join(PROJECT_ROOT, 'Templates', 'CAD_Template')\n REVIT_SOURCE = os.path.join(PROJECT_ROOT, 'Templates', 'Revit_Template')\n GENERIC_SOURCE = os.path.join(PROJECT_ROOT,\n 'Templates', 'Generic_Template')\n FOLDER_LIST = os.listdir(PROJECT_ROOT)\n project_info = ' '.join(args.project_data) # The parser split at spaces\n logger.debug(f'Project info: {project_info}')\n project_info = project_info.split('%') # Divide it into our 3 fields\n project_number, project_name, project_type = project_info\n assert project_type in ['Revit', 'CAD', 'Generic']\n\n if checkNewProject(project_number, project_name): # Sanity checks\n success = createProject(project_number, project_name, project_type)\n if success:\n logger.info(f'Created project {project_number} {project_name}')\n else:\n logger.error('Project creation failed.')", "def main():\n # Description string will show up in help.\n # Here is how you reuse the doc string from above.\n DESC_STR = main.__doc__\n # Create an epilogue string to further describe the input file\n # The epilogue will show up at the bottom of the help\n EPL_STR = \"\"\"More info shown at the bottom of the help.\"\"\"\n\n # **** argument parsing\n # define the arguments\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=DESC_STR,\n epilog=EPL_STR,\n )\n parser.add_argument(\"posArg1\", help=\"Positional Argument 1\")\n parser.add_argument(\"posArg2\", help=\"Positional Argument 2\")\n # degree must be an integer >= 1\n parser.add_argument(\n \"--degree\",\n default=1,\n type=intDegree,\n metavar=\"\",\n help=\"Polynomial degree used to \\\n curve fit the data. Default value is 1 for linear curve fit.\",\n )\n parser.add_argument(\n \"-c\",\n \"--configFile\",\n default=\"config.ini\",\n metavar=\"\",\n help=\"Configuration file. Default is config.ini.\",\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n default=False,\n help=\"Verbose output, usually used for troubleshooting.\",\n )\n parser.add_argument(\n \"-optArg1\",\n \"--optionalArgument1\",\n default=None,\n metavar=\"\",\n help=\"Optional Argument 1\",\n )\n parser.add_argument(\n \"-optArg2\",\n \"--optionalArgument2\",\n default=\"optArg2\",\n metavar=\"\",\n help='Optional Argument 2. How to escape a special \\\n character (\", \").How to escape the %% character.',\n )\n parser.add_argument(\n \"-optTFArg1\",\n action=\"store_true\",\n default=False,\n help=\"True/False argument set to default to False, and is \\\n set to True if the argument is specified. The argument takes no values.\",\n )\n # add a mutually exclusive required group\n typegroup = parser.add_mutually_exclusive_group(required=True)\n typegroup.add_argument(\n \"-me1\", action=\"store_true\", default=False, help=\"Mutually Exclusive choice 1\"\n )\n typegroup.add_argument(\n \"-me2\", action=\"store_true\", default=False, help=\"Mutually Exclusive choice 2\"\n )\n typegroup.add_argument(\n \"-me3\", action=\"store_true\", default=False, help=\"Mutually Exclusive choice 3\"\n )\n # parse the arguments\n args = parser.parse_args()\n\n # At this point, the arguments will be:\n # Argument Type Default Notes\n # args.posArg1 string\n # args.posArg2 string\n # args.degree integer >= 1\n # args.configFile string 'config.ini'\n # args.verbose True/False default False\n # args.optionalArgument1 string\n # args.optionalArgument2 string\n # args.optTFArg1 True/False\n # args.me1 True/False\n # args.me2 True/False\n # args.me3 True/False\n\n # Put the begin mark here, after the arg parsing, so argument problems are\n # reported first.\n print(\"**** Begin Processing ****\")\n # get start processing time\n procStart = datetime.now()\n print(\" Process start time: \" + procStart.strftime(\"%m/%d/%Y %H:%M:%S\"))\n\n # bring in config data from config.ini by default or from file specified\n # with -c argument\n config = configparser.ConfigParser()\n cfgFile = config.read(args.configFile)\n # bail out if no config file was read\n if not cfgFile:\n print(\n \"\\nERROR: The configuration file: \"\n + args.configFile\n + \" was not found. Exiting.\"\n )\n quit()\n # if we get here, we have config data\n if args.verbose:\n print(\"\\nThe config file(s) used are:\")\n print(cfgFile)\n print(\"\\nThe resulting configuration has these settings:\")\n for section in config:\n print(section)\n for key in config[section]:\n print(\" \", key, \":\", config[section][key])\n\n if args.verbose:\n print(\"\\nThe following arguments were parsed:\")\n print(args)\n\n # Process the arguments\n if args.posArg1 is not None:\n print(args.posArg1)\n else:\n # arg is none, so print a message.\n # Not actually possible, since this is a positional argument.\n # Included here so we can see how to process arguments.\n print(\"No value for posArg1.\")\n\n if args.posArg2 is not None:\n print(args.posArg2)\n else:\n # arg is none, so print a message.\n # Not actually possible, since this is a positional argument.\n # Included here so we can see how to process arguments.\n print(\"No value for posArg2.\")\n\n if args.degree is not None:\n print(args.degree)\n else:\n # arg is none, so print a message.\n print(\"No value for degree.\")\n\n if args.optionalArgument1 is not None:\n print(args.optionalArgument1)\n else:\n # arg is none, so print a message.\n print(\"No value for optArg1.\")\n\n if args.optionalArgument2 is not None:\n print(args.optionalArgument2)\n else:\n # arg is none, so print a message.\n print(\"No value for optArg1.\")\n\n if args.optTFArg1 is not None:\n print(args.optTFArg1)\n else:\n # arg is none, so print a message.\n print(\"No value for optTFArg1.\")\n\n if args.me1 is not None:\n print(args.me1)\n else:\n # arg is none, so print a message.\n print(\"No value for me1.\")\n\n if args.me2 is not None:\n print(args.me2)\n else:\n # arg is none, so print a message.\n print(\"No value for me2.\")\n\n if args.me3 is not None:\n print(args.me3)\n else:\n # arg is none, so print a message.\n print(\"No value for me3.\")\n\n # get end processing time\n procEnd = datetime.now()\n print(\"\\n**** End Processing ****\")\n print(\" Process end time: \" + procEnd.strftime(\"%m/%d/%Y %H:%M:%S\"))\n print(\" Duration: \" + str(procEnd - procStart) + \"\\n\")", "def main(args):\n input_file = args[1]\n output_occupations = args[2]\n output_states = args[3]\n\n print(\"Analyzing input file:\")\n summary = process_data.Summary(input_file)\n print(\"Reading input data\")\n summary.read_file()\n\n print(\"Computing summaries\")\n occupations = summary.get_results(input_format.Concept.SOC_NAME)\n states = summary.get_results(input_format.Concept.WORK_STATE)\n\n print(\"Writing results\")\n occupations.to_file(output_occupations)\n states.to_file(output_states)", "def main():\n\n file_name_base = \"./lab-record/result/fairness/\"\n scenarios = ['lan', 'wan1', 'wan2']\n scenario = scenarios[2]\n\n algorithms = [\"bbr\", \"scalable\", \"bic\", \"highspeed\", \"htcp\", \"hybla\",\n \"illinois\", \"vegas\", \"yeah\"]\n names = [\"BBR\", \"Scalable\", \"BIC\", \"High Speed\",\n \"H-TCP\", \"Hybla\", \"Illinois\", \"Vegas\", \"YeAH\"]\n\n test_types = [\"vs_reno\", \"vs_cubic\", \"vs_itself\"]\n\n fsize = 36\n \n index_reno = []\n index_cubic = []\n index_itself = []\n\n data = []\n \n print 'Loadint statistics for ' + file_name_base + '/' + scenario\n\n for algorithm in algorithms:\n for test in test_types:\n path_base = file_name_base + \"/\" + scenario + \"/\" + test + \"/\" + \\\n algorithm + \"/\"\n if test == \"vs_itself\":\n exp_name = names[algorithms.index(algorithm)] + \"_1\"\n con_name = names[algorithms.index(algorithm)] + \"_2\"\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \"_1.log\"\n con_filename = \"/\" + algorithm + \"_2.log\"\n process(path_base, exp_filename, con_filename, index_itself)\n if test == \"vs_reno\":\n exp_name = names[algorithms.index(algorithm)]\n con_name = \"Reno\"\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \".log\"\n con_filename = \"/reno.log\"\n process(path_base, exp_filename, con_filename, index_reno)\n if test == \"vs_cubic\":\n con_name = \"CUBIC\"\n exp_name = names[algorithms.index(algorithm)]\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \".log\"\n con_filename = \"/cubic.log\"\n process(path_base, exp_filename, con_filename, index_cubic)\n\n size = 9\n x = numpy.arange(size)\n\n total_width, n = 1.2, 2.5\n width = 1.0 / n\n x = x - (total_width - width) / 2\n\n for i in range(0, len(x)):\n x[i] += 0.5 * i\n\n # Exp\n fig = plt.figure()\n\n # Con\n con_reno = plt.bar(x + 0 * width - 1.2,\n index_reno,\n width=width,\n label='Against Reno',\n alpha=0.5,\n color=\"darkorange\")\n\n con_cubic = plt.bar(x + 1 * width - 1.2,\n index_cubic,\n width=width,\n label='Against CUBIC',\n alpha=0.5,\n color=\"lawngreen\")\n\n con_itself = plt.bar(x + 2 * width - 1.2,\n index_itself,\n width=width,\n label='Against Another Same CCA',\n alpha=0.5,\n color=\"dodgerblue\")\n\n # Index\n plt.xticks(x + 1.5 * width - 1.2, [\"BBR\", \"Scalable\", \"BIC\", \"High Speed\",\n \"H-TCP\", \"Hybla\", \"Illinois\", \"Vegas\",\n \"YeAH\"],\n fontsize=fsize,\n rotation=\"45\")\n plt.ylabel(\"Jain`s Fairness Index\", fontsize=fsize)\n plt.yticks(fontsize=fsize)\n plt.ylim(0.5, 1.1)\n\n ax = plt.subplot(111)\n ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n ncol=3, mode=\"expand\", borderaxespad=0., fontsize=fsize)\n\n plt.subplots_adjust(left=0.07, right=0.98, top=0.9, bottom=0.2)\n\n plt.show()", "def main():\n try:\n arguments = docopt(__doc__)\n house = arguments['--house']\n character = arguments['--character']\n book = arguments['--books']\n if house:\n get_house(house)\n if character:\n get_character(character)\n if book:\n get_book(book)\n\n except DocoptExit as e:\n print e.message", "def main(self):\n\n argprs = argvparse.Argparse()\n\n lines = []\n\n if not argprs.files:\n self.read_user_input()\n lines.append(self.commandline)\n self.send_lines_to_finditer(argprs.regex, lines,\n argprs.underscore, argprs.color, argprs.machine)\n else:\n # print argprs.files\n for fl in argprs.files:\n try:\n filerd = fileread.Fileread(fl)\n self.send_lines_to_finditer(argprs.regex, filerd.lines,\n argprs.underscore, argprs.color, argprs.machine,\n filerd.shortfilename)\n except Exception as e:\n print str(e), \"\\n\"", "def main():\n parser = argparse.ArgumentParser(description='investigate code health and random statistics')\n sub_parsers = parser.add_subparsers(dest='command_name', title='Commands', help='', metavar='<command>')\n\n sub = sub_parsers.add_parser('line-count', help='list line counts')\n sub.add_argument('files', nargs='+', help='files or folders to look in')\n sub.add_argument('--each', type=int, default=1)\n sub.add_argument('--show', action='store_true')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_line_count)\n\n sub = sub_parsers.add_parser('include-list', help='list headers from files')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--print', dest='print_files', action='store_true')\n sub.add_argument('--print-stats', dest='print_stats', action='store_true')\n sub.add_argument('--print-max', dest='print_max', action='store_true')\n sub.add_argument('--no-list', dest='print_list', action='store_false')\n sub.add_argument('--count', default=2, type=int, help=\"only print includes that are more or equal to <count>\")\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.set_defaults(func=handle_list)\n\n sub = sub_parsers.add_parser('include-gv', help='generate a graphviz of the includes')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.add_argument('--group', action='store_true', help=\"group output\")\n sub.add_argument('--cluster', action='store_true', help=\"group output into clusters\")\n sub.set_defaults(func=handle_gv)\n\n sub = sub_parsers.add_parser('list-indents', help='list the files with the maximum indents')\n sub.add_argument('files', nargs='+')\n sub.add_argument('--each', type=int, default=1, help='group counts')\n sub.add_argument('--show', action='store_true', help='include files in list')\n sub.add_argument('--hist', action='store_true', help='show simple histogram')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_list_indents)\n\n sub = sub_parsers.add_parser('missing-pragma-once', help='find headers with missing include guards')\n sub.add_argument('files', nargs='+')\n sub.set_defaults(func=handle_missing_include_guards)\n\n sub = sub_parsers.add_parser('missing-in-cmake', help='find files that existis on disk but missing in cmake')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_missing_in_cmake)\n\n sub = sub_parsers.add_parser('list-no-project-folders', help='find projects that have not set the solution folder')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_list_no_project_folder)\n\n sub = sub_parsers.add_parser('check-files', help=\"find files that doesn't match the name style\")\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_check_files)\n\n args = parser.parse_args()\n if args.command_name is not None:\n args.func(args)\n else:\n parser.print_help()", "def main(argv):\n\n output_filename = ''\n input_filename = ''\n langCode = 'en'\n language = False\n\n # add support for default (en) language\n language = gettext.translation(\n 'webperf-core', localedir='locales', languages=[langCode])\n language.install()\n _ = language.gettext\n\n try:\n opts, args = getopt.getopt(\n argv, \"hi:o:\", [\"help\", \"input=\", \"output=\"])\n except getopt.GetoptError:\n print(main.__doc__)\n sys.exit(2)\n\n if (opts.__len__() == 0):\n print(main.__doc__)\n sys.exit(2)\n\n for opt, arg in opts:\n if opt in ('-h', '--help'): # help\n print(main.__doc__)\n sys.exit(2)\n elif opt in (\"-i\", \"--input\"): # input file path\n input_filename = arg\n\n file_ending = \"\"\n file_long_ending = \"\"\n if (len(input_filename) > 4):\n file_ending = input_filename[-4:].lower()\n if (len(input_filename) > 7):\n file_long_ending = input_filename[-7:].lower()\n\n if file_long_ending == \".sqlite\":\n from engines.sqlite import read_sites, add_site, delete_site\n elif (file_ending == \".csv\"):\n from engines.csv import read_sites, add_site, delete_site\n elif (file_ending == \".xml\"): # https://example.com/sitemap.xml\n from engines.sitemap import read_sites, add_site, delete_site\n else:\n from engines.json import read_tests, read_sites, add_site, delete_site\n pass\n elif opt in (\"-o\", \"--output\"): # output file path\n output_filename = arg\n pass\n\n tests = read_tests(input_filename, 0, -1)\n generated_date = False\n co2s = list()\n\n for test in tests:\n if not generated_date:\n generated_date = datetime.fromisoformat(\n test[FIELD_INDEX_DATE]).strftime('%Y-%m-%d')\n\n str_data = test[FIELD_INDEX_DATA].replace('\\'', '\"')\n data = json.loads(str_data)\n print(str_data)\n co2s.append(data['co2'])\n\n if not generated_date:\n generated_date = datetime.today().strftime('%Y-%m-%d')\n\n output_content = \"# This array was last generated with carbon-rating.py on {0}\\n\".format(\n generated_date)\n output_content += \"def get_generated_date():\\n\"\n output_content += \"\\treturn '{0}'\\n\".format(\n generated_date)\n output_content += \"\\n\"\n output_content += \"def get_percentiles():\\n\"\n output_content += \"\\treturn [\\n\"\n\n co2s_sorted = sorted(co2s)\n\n intervals = list()\n\n index = 1\n while (index <= 100):\n percentile = getPercentile(co2s_sorted, index)\n intervals.append(percentile)\n position = index - 1\n if index < 100:\n if position % 10 == 0 and position != 0:\n output_content += \"\\t\\t# {0} percentile\\n\".format(position)\n\n output_content += \"\\t\\t{0},\\n\".format(percentile)\n else:\n output_content += \"\\t\\t{0}\\n\".format(percentile)\n index += 1\n\n output_content += \"\\t]\"\n\n print(output_content)\n if (len(output_filename) > 0):\n write(output_filename, output_content)", "def main():\n\n start_program()\n yes_syn_words, no_syn_words, stop_words, record, mp3_filename, text, device_index, output_file = \\\n process_parameter_set()\n stand_alone_flag = process_check_input_argument()\n process_speak_listen(device_index, mp3_filename, text, record, flag=1)\n text = process_name(device_index, mp3_filename, record)\n input_details = process_speak_listen(device_index, mp3_filename, text, record, flag=0)\n response = process_input_details(device_index, input_details, mp3_filename, record, yes_syn_words, no_syn_words,\n stop_words)\n process_output_file_write(output_file, response)\n process_delete_mp3_output_files(stand_alone_flag)\n exit_program()", "def input_info(path_input):\n #path for your data directory, path for your data save, and names for the lists\n #Import with yaml file: input path and prefix information for files\n input_file = glob.glob('ExoTRed_input.yaml')\n if input_file: #if exist the input file, the code will obtain all information We need to run all tasks\n if len(input_file) == 1: #if there is only one yaml file, obtain data and save paths, and return that with a dictionary with information\n print 'reading input file ... \\n'\n file = yaml.load(open(input_file[0])) #creating our dictionary of input variables\n data_path = file['data_path']\n save_path = file['save_path']\n print '.... done! \\n'\n if len(input_file) > 1: #if are more than one yaml file,. the code will ask to you remove the others.\n print 'reading input file ... \\n'\n print '.... there is more than 1 input_path*.yaml.\\n \\nPlease, remove the others files that you do not need. \\n'\n raise SystemExit\n else:\n #if aren't a yaml file, the code ask for you to put a valid yaml file path.\n print 'There is no input_path*.yaml. \\nPlease, create a input file describe in INPUT_PARAMETERS.'\n raise SystemExit\n input_file = file #creating a better name to our dictionary info\n return data_path, save_path, input_file", "def main():\n argument_parser = argparse.ArgumentParser(add_help=True)\n argument_parser.add_argument(\"directory\", type=str,\n help=\"Directory to detect test smells.\")\n args = argument_parser.parse_args()\n \n if len(sys.argv) < 1:\n \n argument_parser.print_help()\n \n else:\n \n if os.path.exists(args.directory) or os.path.isdir(args.directory):\n\n #Stage 1: project level rule checking\n files = python_parser.get_python_files(os.path.abspath(args.directory))\n results_list = project_rule_runner(files)\n \n #Stage 2: test case level rule checking\n #test_case_pairs_list is a list of test cases paired with their file of origin\n filtered_files = python_parser.filter_python_files(files)\n test_case_pairs_list = python_parser.get_test_case_asts(filtered_files)\n \n for test_case_pair in test_case_pairs_list:\n results_list = results_list + test_case_rule_runner(test_case_pair)\n \n #Stage 3: test method level rule checking\n test_method_list = list()\n \n for test_case_pair in test_case_pairs_list:\n test_method_list = test_method_list + python_parser.get_test_asts(test_case_pair)\n \n for test_method in test_method_list: \n results_list = results_list + test_method_rule_runner(test_method)\n \n #Output formatting\n format_output(results_list)\n \n else:\n print(\"Invalid path given.\")", "def main():\n header = \"\"\"\n###############################################################################\n# #\n# Obtain data for the lookup execution time estimator #\n# #\n# --------------------------------------------------------------------------- #\n# #\n# Import execution times to mongodb from #\n# 1. mongodb_log via recorded blackboard skiller calls #\n# 2. samples of a mixed gaussian distribution #\n# #\n###############################################################################\n\"\"\"\n parser = argparse.ArgumentParser(\n description=textwrap.dedent(header),\n formatter_class=argparse.RawTextHelpFormatter,\n )\n common = argparse.ArgumentParser(add_help=False)\n group = common.add_argument_group(\"Mongodb options\")\n group.add_argument(\n \"--mongodb-uri\",\n type=str,\n help=\"The MongoDB URI of the execution time estimator lookup database (default: %(default)s)\",\n default=\"mongodb://localhost:27017/\",\n )\n group.add_argument(\n \"--db\",\n type=str,\n help=textwrap.dedent(\"\"\"name of the lookup database (default: %(default)s)\"\"\"),\n default=\"skills\",\n )\n group.add_argument(\n \"--dry-run\",\n \"-d\",\n action=\"store_true\",\n help=\"only create samples without uploading them to mongodb\",\n )\n group.add_argument(\n \"--collection\",\n \"-c\",\n type=str,\n help=\"name of the lookup collection (default: %(default)s)\",\n default=\"exec_times\",\n )\n group.add_argument(\n \"--drop-collection-first\",\n \"-dc\",\n action=\"store_true\",\n help=\"clear all old data from the collection\",\n )\n subparsers = parser.add_subparsers(\n help=\"Source of the execution time data\", dest=\"subparser\"\n )\n bb_parser = subparsers.add_parser(\n \"bblog\",\n parents=[common],\n description=textwrap.dedent(\n header\n + \"\"\"\\\n# #\n# Selected option 1 #\n# #\n###############################################################################\n\"\"\"\n ),\n formatter_class=argparse.RawTextHelpFormatter,\n )\n bb_parser.set_defaults()\n random_parser = subparsers.add_parser(\n \"generate\",\n parents=[common],\n description=textwrap.dedent(\n header\n + \"\"\"\\\n# #\n# Selected option 2 #\n# #\n###############################################################################\n\"\"\"\n ),\n formatter_class=argparse.RawTextHelpFormatter,\n )\n random_parser.set_defaults()\n bb_sanity = bb_parser.add_argument_group(\"Sanity checks to avoid faulty entries\")\n bb_sanity.add_argument(\n \"--lower-bound\",\n \"-l\",\n type=float,\n default=0,\n help=\"ignore entries with duration smaller than this\",\n )\n bb_sanity.add_argument(\n \"--upper-bound\",\n \"-u\",\n type=float,\n default=float(\"inf\"),\n help=\"ignore entries with duration smaller than this\",\n )\n bb_log = bb_parser.add_argument_group(\"Blackboard log information\")\n bb_log.add_argument(\n \"--src-uri\",\n type=str,\n help=\"The MongoDB URI of the blackboard log connection (default: %(default)s)\",\n default=\"mongodb://localhost:27017/\",\n )\n bb_log.add_argument(\n \"--src-db\",\n type=str,\n help=\"The name of the blackboard log database (default: %(default)s)\",\n default=\"fflog\",\n )\n bb_log.add_argument(\n \"--src-col\",\n type=str,\n help=\"The name of the blackboard log collection (default: %(default)s)\",\n default=\"SkillerInterface.Skiller\",\n )\n bb_log.add_argument(\n \"--drop-src-col\",\n type=bool,\n help=\"Delete the skiller blackboard log collection afterwards\",\n default=False,\n )\n\n skill = random_parser.add_argument_group(\"Skill information\")\n skill.add_argument(\n \"--quantity\",\n \"-n\",\n type=int,\n help=\"number of entries to generate\",\n required=True,\n )\n skill.add_argument(\n \"--skill-name\",\n \"-s\",\n type=str,\n help=\"skill name to generate entries for\",\n required=True,\n )\n skill.add_argument(\n \"--skill-args\",\n \"-a\",\n type=str,\n nargs=\"+\",\n action=\"append\",\n help=textwrap.dedent(\n \"\"\"skill arguments. usage -a <arg-name> <val1> <val2> ...\n where val<i> are the possible values of the argument that will be chosen from at random\n * (placeholder value) if no values are given\n \"\"\"\n ),\n )\n\n gauss = random_parser.add_argument_group(\"Mixed gaussian distribution\")\n gauss.add_argument(\n \"--gauss-params\",\n \"-g\",\n type=float,\n help=\"mean and standard deviation (in that order) of a gaussian, repeat this option to add more gaussians\",\n nargs=2,\n required=True,\n action=\"append\",\n )\n gauss.add_argument(\n \"--dist-weights\",\n \"-w\",\n type=float,\n default=[],\n help=\"Weight of each gauss distribution (default 1)\",\n nargs=\"+\",\n )\n gauss.add_argument(\n \"--lower-bound\",\n \"-l\",\n type=float,\n default=0,\n help=\"clip distribution to a lower bound\",\n )\n gauss.add_argument(\n \"--upper-bound\",\n \"-u\",\n type=float,\n default=float(\"inf\"),\n help=\"clip distribution to an upper bound\",\n )\n\n visual = random_parser.add_argument_group(\"Visualization options\")\n visual.add_argument(\n \"--bin-size\",\n \"-b\",\n type=int,\n help=\"number of bins to display sampled durations (default: %(default)s)\",\n default=50,\n )\n visual.add_argument(\n \"--non-interactive\",\n \"-y\",\n action=\"store_true\",\n help=\"skip drawing the sample range\",\n )\n parser.epilog = (\n \"--- Arguments common to all sub-parsers ---\"\n + common.format_help().replace(common.format_usage(), \"\")\n )\n random_parser.epilog = \"\"\"\nexample call: ./mongodb_skillsim_lookup.py generate -d -n \\\n1000 -g 10 2 -g 20 3 -w 1 5 -s test -a arg1 value1 value2 -a arg2\n \"\"\"\n args = parser.parse_args(args=None if sys.argv[1:] else [\"--help\"])\n # validate inputs\n if args == None:\n parser.exit(1)\n\n mongoIf = MongoInterface(args.mongodb_uri, args.db, args.collection, args.dry_run)\n if args.drop_collection_first and not args.dry_run:\n print(\"Drop collection before uploading...\")\n drop_collection(args.mongodb_uri, args.db, args.collection)\n if args.subparser == \"bblog\":\n mongoIf.transform(\n args.src_uri, args.src_db, args.src_col, args.lower_bound, args.upper_bound\n )\n if args.drop_src_col:\n drop_collection(args.src_mongodb_uri, args.src_db, args.src_col)\n elif args.subparser == \"generate\":\n sampler = GaussSampler(\n args.quantity,\n args.dist_weights,\n args.gauss_params,\n args.upper_bound,\n args.lower_bound,\n )\n if not args.non_interactive:\n sampler.display(args.bin_size)\n mongoIf.upload(sampler.samples, args.skill_name, args.skill_args)\n else:\n print(\"unrecognized mode\")", "def main():\r\n parser = get_parser()\r\n config = parser.parse_args(['--cfg', 'config.yaml'])\r\n result_filing.init_config_vars(config)\r\n run_id = config.info.run_id\r\n logger = custom_logger.CustomLogger(run_id+':'+file_id)\r\n\r\n operation = config.info.operation_type\r\n logger.info(\"Selected operation type %s.\"%(operation))\r\n if operation == const.TRAIN_OP:\r\n train.train_model(config)\r\n elif operation == const.DEPLOY_OP:\r\n test.test_model(config)", "def main():\n\n parser = argparse.ArgumentParser(description='Duolingo shared task baseline model')\n parser.add_argument('--train', help='Training file name', required=True)\n parser.add_argument('--test', help='Test file name, to make predictions on', required=True)\n parser.add_argument('--pred', help='Output file name for predictions, defaults to test_name.pred')\n args = parser.parse_args()\n\n if not args.pred:\n args.pred = args.test + '.pred'\n\n assert os.path.isfile(args.train)\n assert os.path.isfile(args.test)\n\n # Assert that the train course matches the test course\n assert os.path.basename(args.train)[:5] == os.path.basename(args.test)[:5]\n\n training_data, training_labels = load_data(args.train)\n test_data = load_data(args.test)\n\n ####################################################################################\n # Here is the delineation between loading the data and running the baseline model. #\n # Replace the code between this and the next comment block with your own. #\n ####################################################################################\n\n\n vectorizer = DictVectorizer()\n X_train = [instance_data.to_features() for instance_data in training_data]\n Y_train = [training_labels[instance_data.instance_id] for instance_data in training_data]\n ids_train = [instance_data.instance_id for instance_data in training_data]\n\n X_test = [instance_data.to_features() for instance_data in test_data]\n ids_test = [instance_data.instance_id for instance_data in test_data]\n\n\n X_train = vectorizer.fit_transform(X_train)\n clf = LogisticRegression()\n clf.fit(X_train,Y_train)\n\n X_test = vectorizer.transform(X_test)\n preds_scores = [x[0] for x in clf.predict_proba(X_test)]\n predictions = dict([(instance_id,pred_score) for instance_id,pred_score in zip(ids_test,preds_scores)])\n\n ####################################################################################\n # This ends the baseline model code; now we just write predictions. #\n ####################################################################################\n\n with open(args.pred, 'wt') as f:\n for instance_id, prediction in iteritems(predictions):\n f.write(instance_id + ' ' + str(prediction) + '\\n')", "def main():\n now = time.strftime('%Y%m%d%H%M%S')\n\n # info = get_info(now)\n # info_filename = 'info_' + now + '.csv'\n # info.to_csv(os.path.join('..', '..', 'data', 'raw', info_filename), index=False)\n\n questions = get_questions(now)\n\n # don't talk about all this detail in the talk", "def main():\n actual_dir = os.getcwd()\n i18n_dir = os.path.join(actual_dir, 'i18n') # Directory of I18n app.\n i18n_dirname = os.path.basename(i18n_dir)\n models_file = os.path.join(i18n_dir, 'models.py')\n data_dir = os.path.join(i18n_dir, 'data') # CSV files.\n data_license = os.path.join(data_dir, 'LICENSE_CC')\n project_dir = os.path.dirname(i18n_dir)\n settings_file = os.path.join(project_dir, 'settings.py')\n\n show_license(data_license)\n i18n_model = setup_environ(project_dir, i18n_dirname, settings_file)\n models = get_data_models(models_file)\n new_models = sort_models(data_dir, models)\n for model in new_models:\n load_data(model, i18n_model, i18n_dirname)", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def main():\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n get_user_config()", "def main():\n\targs = getArgs()\n\tid_question = args.id_question\n\tlang = args.language\n\tdir_cp = None\n\twith open('config.json') as json_file:\n\t\tconfig_data = json.load(json_file)\n\t\tdir_cp = config_data['dir_cp']\n\n\t\n\t\n\t# sample_io = GetData(args.id_question).get_uri_io_sample()\n\ttemplate = FileUtil(id_question, dir_cp['path'], lang)\n\ttemplate.write_template()\n\t# print(sample_io)", "def main():\n\n args = parse_args()\n\n # Read frame.\n insertion_df = Insertion.from_csv(args.insertions, sep='\\t', as_frame=True)\n\n # Create output directory if it doesn't exist.\n args.output_dir.mkdir(exist_ok=True, parents=True)\n\n if args.samples is not None:\n # Subset for samples and convert to categorical.\n mask = insertion_df['sample'].isin(args.samples)\n\n insertion_df = insertion_df.loc[mask]\n insertion_df['sample'] = pd.Categorical(\n insertion_df['sample'], categories=args.samples)\n\n # Split and write individual outputs.\n for sample, grp in insertion_df.groupby('sample'):\n if args.remove_prefix:\n grp['id'] = grp['id'].str.replace(sample + '.', '')\n\n if len(grp) == 0:\n print('WARNING: no insertions found for sample {}'.format(sample))\n\n sample_path = args.output_dir / '{}.txt'.format(sample)\n grp.to_csv(str(sample_path), sep='\\t', index=False)", "def main():\r\n run_processes('tests.csv', 'labs.csv')", "def help():\n description = \"\"\"\n Preprocess the raw reads of FASTQ files of Trac-looping to reference\n geneome with bowtie2 and obtain the unqiue PETs with quality control\n results.\n Fastqs files should be named with suffix pattern as \n _R1.fastq.gz, _R2.fastq.gz.\n\n Example:\n tracPre.py -fqd ../1.fq -o ./ -ref ../bowtie2/hg38 -n 10 -p 5 -mapq 10\n \"\"\"\n parser = argparse.ArgumentParser(description=description,\n formatter_class=RawTextHelpFormatter)\n parser.add_argument(\n \"-fqd\",\n dest=\"fqd\",\n required=True,\n type=str,\n help=\"The directory for raw .fastq.gz files, for example ../1.fastq/ \"\n )\n parser.add_argument(\n \"-o\",\n dest=\"output\",\n required=False,\n type=str,\n default=\"./\",\n help=\n \"Output directory, default is ./, if directory not exists, create one.\"\n )\n parser.add_argument(\n \"-ref\",\n dest=\"ref\",\n required=True,\n type=str,\n help=\n \"Bowtie2 reference index prefix, such as ./ref/hg38, generated from\\n\"\\\n \"bowtie2-build hg38.fa hg38.\"\n )\n parser.add_argument(\n \"-n\",\n dest=\"number\",\n required=False,\n type=int,\n default=1,\n help=\"How many Bowtie2 to run at the same time, default is 1. \")\n parser.add_argument(\n \"-p\",\n dest=\"cpu\",\n required=False,\n type=int,\n default=5,\n help=\"How many cpus used by each Bowtie2 or following processing,\\n\"\\\n \"default is 5. \"\n )\n parser.add_argument(\n \"-mapq\",\n dest=\"mapq\",\n required=False,\n default=10,\n type=int,\n help=\"MAPQ cutoffs for filtering PETs, default is 10.\"\n )\n op = parser.parse_args()\n return op", "def main():\n\n # this will analyze all files in the input_files directory\n for folder in [x for x in os.listdir(os.path.join(os.getcwd(), 'test_directory')) if os.path.isdir(os.path.join(os.getcwd(), 'test_directory', x))]:\n try:\n print(f'Creating GED_Repo for files in {folder}')\n g = GED_Repo([os.path.join(os.getcwd(), 'test_directory', folder, f) for f in os.listdir(os.path.join(os.getcwd(), 'test_directory', folder)) if f.endswith('.ged')])\n g.check_data()\n g.print_data()\n g.print_individuals()\n g.print_families()\n except ValueError as v:\n print(v)\n except FileNotFoundError as f:\n print(f)", "def main(self):\n self.parse_command_line()\n\n if self.args.unshuffle:\n suffix = \"-unshuffled\"\n else:\n suffix = \"-shuffled\"\n\n if self.args.input is None:\n self.guess_input(suffix)\n\n if self.args.output is None:\n inpath = self.args.input\n self.args.output = inpath.with_stem(f\"{inpath.stem}{suffix}\")\n\n print(f\"{self.args.input} -> {self.args.output}\")\n\n indoc = fitz.Document(self.args.input)\n n_pages = indoc.page_count\n if n_pages < 2:\n print(f\"Not enough pages ({n_pages}) in document\")\n return\n\n if self.args.pages or self.args.unshuffle:\n chaps = pd.DataFrame.from_records(\n [\n {\n \"first_1\": n_page,\n \"title\": self.get_title(n_page, page),\n \"has_text\": self.has_text(page),\n }\n for n_page, page in enumerate(indoc, 1)\n ]\n )\n if not self.args.keep_empty_pages:\n chaps = chaps.query(\"has_text\")\n else:\n # In most PyMuPDF APIs, page numbers are 0-based,\n # but in the toc they're 1-based\n toc = [\n {\"first_1\": entry[2], \"title\": entry[1]}\n for entry in indoc.get_toc(simple=False)\n if entry[3].get(\"to\", TOP_OF_PAGE) == TOP_OF_PAGE\n ]\n if len(toc) < 1:\n print(\"No relevant bookmarks in document\")\n return\n\n toc.append({\"first_1\": n_pages + 1, \"title\": \"END\"})\n chaps = pd.DataFrame.from_records(toc, index=\"first_1\")\n if 1 not in chaps.index:\n chaps.loc[1, \"title\"] = f\"{self.args.input.name}:1\"\n\n chaps = chaps.sort_index().reset_index()\n\n if (n_chaps := len(chaps)) < 2:\n print(f\"Not enough bookmarks in {self.args.input}\")\n return\n\n first_idx = self.num_to_idx(self.args.first_chapter, n_chaps)\n last_idx = self.num_to_idx(self.args.last_chapter, n_chaps)\n\n if not 0 <= first_idx <= last_idx < n_chaps:\n print(f\"Document has {n_chaps} chapters, doens't fit\")\n return\n\n chaps = chaps.iloc[first_idx:last_idx].copy()\n\n chaps[\"next_1\"] = chaps.first_1.shift(-1, fill_value=n_pages + 1)\n chaps[\"length\"] = chaps.next_1 - chaps.first_1\n chaps[\"first_0\"] = chaps.first_1 - 1\n chaps[\"last_0\"] = chaps.next_1 - 2\n\n chaps = chaps.loc[chaps.length >= self.args.min]\n if self.args.max:\n chaps = chaps.loc[chaps.length <= self.args.max]\n\n if self.args.no_shuffle:\n pass # Leave as-is\n elif self.args.reverse:\n chaps = chaps[::-1]\n elif self.args.unshuffle:\n chaps.sort_values(\"title\", inplace=True)\n else:\n rng = self.randomize()\n chaps = chaps.sample(frac=1, random_state=rng.bit_generator)\n\n outdoc = fitz.Document()\n outtoc = []\n\n for _, chap in chaps.iterrows():\n outtoc.append((1, chap.title, outdoc.page_count + 1))\n outdoc.insert_pdf(indoc, chap.first_0, chap.last_0, final=False)\n if not self.args.unshuffle:\n outdoc.set_toc(outtoc)\n\n if not self.args.keep_empty_pages:\n for pno in range(outdoc.page_count - 1, 0, -1):\n if not self.has_text(outdoc[pno]):\n outdoc.delete_page(pno)\n\n if outdoc.page_count < indoc.page_count:\n print(f\"Number of pages in {self.args.input}: {indoc.page_count}\")\n print(f\"Number of pages in {self.args.output}: {outdoc.page_count}\")\n outdoc.ez_save(self.args.output)", "def main():\n parser = ArgumentParser(usage='%(prog)s [options] ecommonsMetadata.csv')\n parser.add_argument(\"-d\", \"--date\", dest=\"date\",\n help=\"Date on or after that an ETD was published for \\\n creating DOIs. Put in format YYYY-MM\")\n parser.add_argument(\"datafile\", help=\"eCommons metadata worked from.\")\n\n args = parser.parse_args()\n\n if not len(sys.argv) > 0:\n parser.print_help()\n parser.exit()\n\n workingdir = csvparse(args.datafile, args.date)\n doiparse(workingdir)\n print('ANVL files available in: ' + workingdir)", "def main(unused_argv):\n make_dir(FLAGS.raw_dir)\n\n # Get paths of download/extracted training and evaluation files.\n print(\"Downloading data from source\")\n train_files = get_raw_files(FLAGS.raw_dir, constants.TRAIN_DATA_SOURCES)\n eval_files = get_raw_files(FLAGS.raw_dir, constants.EVAL_DATA_SOURCES)", "def main():\n\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')", "def run(self):\n # FILE INPUT\n if self.text_type == \"file\":\n self.process_files()\n\n # STRING INPUT\n else:\n self.process_strings()\n\n if self.json:\n self.save_json()\n\n if self.errors:\n print(\"\\nThe following file(s) could not be opened:\")\n for error in self.errors:\n print(f\"\\t{error}\")", "def main():\n processSetOfCerFiles(sys.argv[1:])", "def __main__():\n parser = argparse.ArgumentParser(description='basic output parser', usage='%(prog)s -i input.xml -o output.csv')\n parser.add_argument('--input', '-i', dest='infile', help='file to input xml from')\n parser.add_argument('--output', '-o', dest='outfile', default='output.csv', help='file to output csv to')\n parser.add_argument('--version', '-v', action='version', version='%(prog)s 0.1')\n args = parser.parse_args()\n axmlfile = args.infile\n acsvfile = args.outfile\n\n if not args.infile:\n sys.exit(parser.print_help())\n\n dosomeworkslacker(axmlfile, acsvfile)", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n conn = sqlite3.connect('../raw/td_V2.db')\n git_commits = pd.read_sql_query(\"SELECT * FROM GIT_COMMITS\",conn)\n szz_fault_inducing_commits = pd.read_sql_query(\"SELECT * FROM szz_fault_inducing_commits\",conn)\n refactoring_miner = pd.read_sql_query(\"SELECT * FROM refactoring_miner\",conn)\n refactoring_miner = refactoring_miner[refactoring_miner[\"COMMIT_HASH\"].isin(git_commits[\"COMMIT_HASH\"])]\n git_commits_changes = pd.read_sql_query(\"SELECT * FROM GIT_COMMITS_CHANGES\", conn)\n git_commits_changes = git_commits_changes[git_commits_changes[\"COMMIT_HASH\"].isin(refactoring_miner[\"COMMIT_HASH\"])]\n\n preprocess(git_commits, szz_fault_inducing_commits, refactoring_miner, git_commits_changes)", "def main():\n\n config = None\n\n try:\n args = get_args()\n config = process_config(args.config)\n raise RuntimeError(\"Missing or invalid arguments\")\n except Exception as e:\n logging.error(\"Failed\", exc_info=e)\n\n print(\"Create the data generator.\")\n # data_loader = MnistDataLoader(config=config)\n data_loader = IrisDataLoader(config=config)\n train_data = data_loader.get_train_data()\n test_data = data_loader.get_test_data()\n\n print(\"Build the model\")\n # cnn_model = ConvModel(config=config).build_model()\n cnn_model = ANNModel(config=config).build_model()\n\n print(\"Load the best weights\")\n cnn_model.load_weights(\"experiments/{}/{}/checkpoints/{}-weights.best.hdf5\".format(\n config.evaluation.date, config.exp.name, config.exp.name))\n\n print(\"Evaluate the model\")\n print(\"Training Metrics\")\n evaluate(model=cnn_model, data=train_data)\n print(\"Testing Metrics\")\n evaluate(model=cnn_model, data=test_data)\n\n # print(\"Visualize loss and accuracy for Training and Validation data\")\n # plot_history(config=config)\n\n # print(\"Plotting ROC Curve\")\n # plot_roc(model=cnn_model, data=test_data)\n\n print(\"Classifcation Accuracy Report\")\n classification_accuracy_report(model=cnn_model, data=test_data)", "def main():\n\n file_list = []\n # this will analyze all files in the input_files directory\n for folder in [x for x in os.listdir(os.path.join(os.getcwd(), 'test_directory')) if os.path.isdir(os.path.join(os.getcwd(), 'test_directory', x))]:\n try:\n # print(f'Reading files in {folder}')\n file_list = file_list + [os.path.join(os.getcwd(), 'test_directory', folder, f) for f in os.listdir(os.path.join(os.getcwd(), 'test_directory', folder)) if f.endswith('.ged')]\n except ValueError as v:\n print(v)\n except FileNotFoundError as f:\n print(f)\n\n try:\n print(f'Analyzing final cumulative file data.')\n # print(file_list)\n g = GED_Repo(file_list)\n g.check_data()\n g.print_data()\n g.print_individuals()\n g.print_families()\n except ValueError as v:\n print(v)\n except FileNotFoundError as f:\n print(f)", "def __main__():\n\n args = parse_command_line(sys.argv)\n\n identifiers = []\n if args.input and args.column:\n [\n identifiers.append(line.split(\"\\t\")[args.column - 1].strip())\n for line in open(args.input, \"r\")\n ]\n elif args.text:\n identifiers = [a.strip() for a in args.text.split() if a.strip()]\n\n fetch_fasta(identifiers, args)", "def main():\r\n if len(sys.argv)==4:\r\n\r\n # files path\r\n m_file_path,c_file_path,database = sys.argv[1:]\r\n\r\n # first, read the data\r\n print('Reading the data...')\r\n df = read_data(m_file_path,c_file_path)\r\n print('OK!')\r\n print(' ')\r\n \r\n # clean it\r\n print('Cleaning the data...')\r\n df = clean_data(df)\r\n print('OK!')\r\n print(' ')\r\n \r\n # save it\r\n print('Saving data...')\r\n save_data(df,database)\r\n print(' ')\r\n \r\n # when it's done\r\n print(f'Cleaned data is stored in {database[:-3]} database') \r\n\r\n else:\r\n print('Please provide the filepaths of the messages and categories '\\\r\n 'datasets as the first and second argument respectively, as '\\\r\n 'well as the filepath of the database to save the cleaned data '\\\r\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\r\n 'disaster_messages.csv disaster_categories.csv '\\\r\n 'DisasterResponse.db')", "def main():\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ Reads config file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n print('\\n\\nInterpreting command line options\\n'+'~'*72+'\\n')\n\n parser = ArgumentParser()\n subparser = parser.add_subparsers(\\\n help='run_selafin commands to do', dest='command')\n\n subparser = chop_parser(subparser)\n subparser = scan_parser(subparser)\n subparser = spec_parser(subparser)\n subparser = alter_parser(subparser)\n subparser = merge_parser(subparser)\n subparser = diff_parser(subparser)\n subparser = calcs_parser(subparser, 'calcs', '???')\n subparser = calcs_parser(subparser, 'crunch', '???')\n subparser = calcs_parser(subparser, 'transf', '???')\n subparser = sample_parser(subparser)\n subparser = subdivide_parser(subparser)\n subparser = tesselate_parser(subparser)\n\n options = parser.parse_args()\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ Reads code name ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n if options.command == 'scan':\n scan(options)\n elif options.command == 'spec':\n spec(options)\n elif options.command == 'chop':\n chop(options)\n elif options.command == 'alter':\n alter(options)\n elif options.command == 'merge':\n merge(options)\n elif options.command == 'diff':\n diff(options)\n elif options.command == 'sample':\n sample(options)\n elif options.command in ['calcs', 'crunch', 'transf']:\n calcs(options, options.command)\n elif options.command == 'subdivide':\n subdivide(options)\n elif options.command == 'tessellate':\n tesselate(options)\n else:\n raise TelemacException(\\\n '\\nDo not know what to do with '\n 'this code name: {}'.format(options.command))\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ Jenkins' success message ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n print('\\n\\nMy work is done\\n\\n')\n\n sys.exit(0)", "def main(): \n # Parse Arguments\n args = parse_arguments()\n\n # Print outdir\n print(\"Writing output to \" + args.outdir)\n\n # Print start statement\n print('Starting script for ' + args.file + ' at ' + str(datetime.datetime.now()), flush=True)\n\n # Put all the files in a function that will further handle the files as dataframe\n create_df(args.file, args.outdir)\n\n # Script is finished\n print('All done for ' + args.file + ' at ' + str(datetime.datetime.now()), flush=True)", "def main(argv: List[str] = None) -> None:\n arg_parser = ArgumentParser()\n arg_subparsers = arg_parser.add_subparsers(dest=\"command\")\n\n arg_parser_compile = arg_subparsers.add_parser(\n \"compile\", help=\"Compile a TOML file to a tabbed TXT file\"\n )\n arg_parser_compile.add_argument(\n \"source_target\",\n help=\"Pair of source file (TOML) and target file (TXT)\",\n metavar=\"source target\",\n nargs=\"+\",\n )\n\n arg_parser_decompile = arg_subparsers.add_parser(\n \"decompile\", help=\"Decompile a tabbed TXT file to a TOML file\"\n )\n arg_parser_decompile.add_argument(\n \"source_target\",\n help=\"Pair of source file (TXT) and target file (TOML)\",\n metavar=\"source target\",\n nargs=\"+\",\n )\n\n args = arg_parser.parse_args(argv)\n\n if args.command is None:\n arg_parser.print_help()\n elif args.command == \"compile\":\n for source, target in grouper(args.source_target, 2):\n if not source:\n raise ValueError(f\"Invalid source file {source!r}\")\n if not target:\n raise ValueError(f\"Missing target for source {source!r}\")\n with open(source, encoding=\"utf-8\") as toml_file:\n d2txt_data = toml_to_d2txt(toml_file.read())\n d2txt_data.to_txt(target)\n elif args.command == \"decompile\":\n for source, target in grouper(args.source_target, 2):\n if not source:\n raise ValueError(f\"Invalid source file {source!r}\")\n if not target:\n raise ValueError(f\"Missing target for source {source!r}\")\n d2txt_file = D2TXT.load_txt(source)\n with open(target, mode=\"w\", encoding=\"utf-8\") as toml_file:\n toml_file.write(d2txt_to_toml(d2txt_file))\n else:\n raise ValueError(f\"Unexpected command: {args.command!r}\")", "def main():\n arguments = docopt(__doc__, version='cluster_parameter_extractor 1.0 BETA')\n\n input_file = arguments['--input']\n output_file = arguments[\"--output\"]\n process_synthetic = arguments[\"--synthetic_peptides\"]\n\n # make sure the input file exists\n if not os.path.isfile(input_file):\n print(\"Error: Cannot find input file '\" + input_file + \"'\")\n sys.exit(1)\n\n # make sure the output file does not exist\n if os.path.isfile(output_file):\n print(\"Error: Output file exists '\" + output_file + \"'\")\n sys.exit(1)\n\n with open(output_file, \"w\") as OUT:\n # write the header\n OUT.write(\"id\\tprecursor_mz\\tav_charge\\tsize\\tidentified_spec_count\\tunidentified_spec_count\\t\"\n \"max_ratio\\tmax_il_ratio\\tprecursor_mz_range\\tsequences\\t\"\n \"max_sequence\\tmax_sequence_count\\tmax_sequence_mods\\t\"\n \"second_max_sequence\\tsecond_max_sequence_count\\tsecond_max_sequence_mods\\tn_input_files\\t\"\n \"max_consensus_peak_rel_tic\\tmax_consensus_peak_mz\")\n\n if process_synthetic:\n OUT.write(\"\\tsynth_count\\tsynth_ratio\\tsynth_max_sequence\")\n\n OUT.write(\"\\n\")\n\n # process the file\n parser = clustering_parser.ClusteringParser(input_file)\n\n for cluster in parser:\n cluster_line = process_cluster(cluster)\n OUT.write(cluster_line)\n\n # process synthetic peptides\n if process_synthetic:\n synth_line = process_synthetic_peptides(cluster)\n OUT.write(\"\\t\" + synth_line)\n\n OUT.write(\"\\n\")\n\n print(\"Results written to \" + output_file)", "def _create_descriptions():\n\n # capture global variables requests and poo\n global _requests\n global _pool\n\n # extract cookies\n filenames = g.filenames\n processType = g.processType\n coreFactor = g.coreFactor\n \n # ensure no requests persisted from last time\n del _requests[:]\n\n # determine how program is going to be run\n if processType == 'parallel':\n \n # Multiply core factor by our number of cores\n cores = _get_num_processors()\n num_processes = int(float(coreFactor) * cores)\n \n _pool = Pool(processes=num_processes)\n current_app.logger.info(time.ctime() + \"\\tProcess pool with %s processes initialized for descriptions\" % num_processes)\n _requests = _pool.map(_get_description, filenames)\n \n else:\n for filename in filenames:\n _requests.append(_get_description(filename))\n\n # We create a pool based off these cookie, so no longer need it\n _remove_persist_storage(\"processType\")\n _remove_persist_storage(\"coreFactor\")", "def main():\n args = parse_args()\n process_args(args)", "def main():\n \n\n parser = argparse.ArgumentParser(description='MozartFlow: Observing the flow of music.')\n\n parser.add_argument('-k', '--knn', help='K in K-nearest neighbours algorithm', default=2)\n parser.add_argument('-ll', '--loglevel', help='Set the logging level', type=str, choices=['DEBUG','INFO','WARNING','ERROR','CRITICAL'])\n parser.add_argument('-p', '--path', help='Filepath of the audio file, need to be labeled', type=str, default='')\n \n args = parser.parse_args()\n \n logging.basicConfig(level=args.loglevel)\n\n model = Model(args.knn, args.loglevel)\n model.model()\n\n if args.path is not '':\n model.prediction(args.path)\n else:\n print('\\n[-.-] Ain\\'t you testing something! Well, that\\'s a shame. I learned just for you.')\n\n logger.info('\\n\\n-------/------- Created by ------/-------')\n for creator in model.read_yml['_creator']:\n logger.info('Lord {}'.format(creator))", "def main():\n # Read input from file, returns all objects\n objects = read_input()\n for obj in objects:\n try:\n # Generate the objects answer, yields new object\n obj = generate_answer(obj)\n except Exception:\n # If an error might occur that is not covered, catch it here! Continue where left off\n print('ERROR: An unrecoverable error occured during the processing of ' + obj.get(\n 'operation') + '. Continuing...')\n obj['answer'] = 'ERROR'\n\n print(obj) # TODO: Remove before production\n # Generate an output file\n print_output(objects)", "def main():\n (\n calibration_file,\n drs4_ped_file,\n time_calibration_file,\n systematic_correction_file,\n drive_log_file,\n run_summary_file,\n pedestal_ids_file,\n run_number,\n ) = data_sequence_cli_parsing()\n\n if options.verbose:\n log.setLevel(logging.DEBUG)\n else:\n log.setLevel(logging.INFO)\n\n # Run the routine piping all the analysis steps\n rc = data_sequence(\n calibration_file,\n drs4_ped_file,\n time_calibration_file,\n systematic_correction_file,\n drive_log_file,\n run_summary_file,\n pedestal_ids_file,\n run_number,\n )\n sys.exit(rc)", "def main(params):\n\n train = []\n test = []\n imdir = params['dest'] + '/{0}/COCO_{0}_{1:012d}.jpg'\n\n if params['v'] == 2:\n train_annotations_file = params['dir'] + '/v2_mscoco_train2014_annotations.json'\n val_annotations_file = params['dir'] + '/v2_mscoco_val2014_annotations.json'\n train_questions_file = params['dir'] + '/v2_OpenEnded_mscoco_train2014_questions.json'\n val_questions_file = params['dir'] + '/v2_OpenEnded_mscoco_val2014_questions.json'\n test_questions_file = params['dir'] + '/v2_Questions_Test_mscoco/v2_OpenEnded_mscoco_test2015_questions.json'\n else:\n train_annotations_file = params['dir'] + '/mscoco_train2014_annotations.json'\n val_annotations_file = params['dir'] + '/mscoco_val2014_annotations.json'\n train_questions_file = params['dir'] + '/OpenEnded_mscoco_train2014_questions.json'\n val_questions_file = params['dir'] + '/OpenEnded_mscoco_val2014_questions.json'\n test_questions_file = params['dir'] + '/Questions_Test_mscoco/v2_OpenEnded_mscoco_test2015_questions.json'\n\n if params['split'] == 1:\n\n print('Loading annotations and questions...')\n train_anno = json.load(open(train_annotations_file, 'r'))\n val_anno = json.load(open(val_annotations_file, 'r'))\n\n train_ques = json.load(open(train_questions_file, 'r'))\n val_ques = json.load(open(val_questions_file, 'r'))\n\n subtype = 'train2014'\n for i in range(len(train_anno['annotations'])):\n ans = train_anno['annotations'][i]['multiple_choice_answer']\n\n answer_dict = sum_over_occurences(train_anno['annotations'][i]['answers'])\n question_id = train_anno['annotations'][i]['question_id']\n image_path = imdir.format(subtype, train_anno['annotations'][i]['image_id'])\n\n question = train_ques['questions'][i]['question']\n\n train.append(\n {'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans,\n 'answers': answer_dict})\n\n subtype = 'val2014'\n for i in range(len(val_anno['annotations'])):\n ans = val_anno['annotations'][i]['multiple_choice_answer']\n\n # A modification to count the number of occurences of each answer and then store\n # them in the json file as well\n answer_dict = sum_over_occurences(val_anno['annotations'][i]['answers'])\n\n question_id = val_anno['annotations'][i]['question_id']\n image_path = imdir.format(subtype, val_anno['annotations'][i]['image_id'])\n\n question = val_ques['questions'][i]['question']\n\n test.append({'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans,\n 'answers': answer_dict})\n else:\n print('Loading annotations and questions...')\n train_anno = json.load(open(train_annotations_file, 'r'))\n val_anno = json.load(open(val_annotations_file, 'r'))\n\n train_ques = json.load(open(train_questions_file, 'r'))\n val_ques = json.load(open(val_questions_file, 'r'))\n test_ques = json.load(open(test_questions_file, 'r'))\n\n subtype = 'train2014'\n for i in range(len(train_anno['annotations'])):\n ans = train_anno['annotations'][i]['multiple_choice_answer']\n question_id = train_anno['annotations'][i]['question_id']\n image_path = imdir.format(subtype, train_anno['annotations'][i]['image_id'])\n\n question = train_ques['questions'][i]['question']\n\n train.append(\n {'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans})\n\n subtype = 'val2014'\n for i in range(len(val_anno['annotations'])):\n ans = val_anno['annotations'][i]['multiple_choice_answer']\n question_id = val_anno['annotations'][i]['question_id']\n image_path = imdir.format(subtype, val_anno['annotations'][i]['image_id'])\n\n question = val_ques['questions'][i]['question']\n\n train.append(\n {'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans})\n\n subtype = 'test2015'\n for i in range(len(test_ques['questions'])):\n print(test_ques.keys())\n ans = val_anno['annotations'][i]['multiple_choice_answer']\n question_id = test_ques['questions'][i]['question_id']\n image_path = imdir.format(subtype, test_ques['questions'][i]['image_id'])\n\n question = test_ques['questions'][i]['question']\n\n test.append({'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans})\n\n print('Training sample %d, Testing sample %d...' % (len(train), len(test)))\n\n if v2:\n json.dump(train, open('data/vqa_raw_train.json', 'w'))\n json.dump(test, open('data/vqa_raw_test.json', 'w'))\n else:\n json.dump(train, open('data/VQAv1/vqa_raw_train.json', 'w'))\n json.dump(test, open('data/VQAv1/vqa_raw_test.json', 'w'))", "def main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')", "def main():\n # Parse the required args for processing\n parser = argparse.ArgumentParser(description='This is a direct report calculator made by Dean Hutton')\n parser.add_argument('-i', '--input', help='Input file name used to run direct reports on.', required=True)\n parser.add_argument('-rd', '--run_date', help='The date to display direct reports for.', required=True)\n args = parser.parse_args()\n\n input_file_location = args.input\n\n # Do error checking making sure run_date is valid date and that input file exists\n if not os.path.isfile(input_file_location):\n print('There has been an error locating the input file. Please make sure this file exists {}'.format(args.input))\n sys.exit()\n\n try:\n run_date = datetime.strptime(args.run_date, '%Y-%m-%d')\n except ValueError as e:\n print(\"There has been an error parsing the run date. Please correct this date '{0}' \"\n \"so that it follows follows the '2011-03-24' date format.\".format(args.run_date))\n sys.exit()\n\n all_employee_dict, supervisor_employee_dict = etl_csv_file(input_file_location)\n\n # Check to see if there was an error parsing the CSV file and if so print it and exit\n if not all_employee_dict:\n print supervisor_employee_dict\n sys.exit()\n\n supervisor_milestone_list, all_employee_dict = generate_milestone_data(\n supervisor_employee_dict,\n all_employee_dict,\n run_date\n )\n non_supervisor_list = []\n\n # Create placeholders for all employees that are not supervisors so they can be printed\n for non_supervisor_id in all_employee_dict:\n non_sv_dict = {}\n non_sv_dict['supervisor_id'] = non_supervisor_id\n non_sv_dict['upcoming_milestones'] = 'No direct reports'\n non_supervisor_list.append(non_sv_dict)\n\n # Combine supervisors with non-supervisors for printing\n final_output_list = supervisor_milestone_list + non_supervisor_list\n\n # # Print out the results\n print ('Plain Text')\n pprint.pprint(final_output_list)", "def main(config):\n input_data = config[\"input_data\"]\n\n # Separate OBS from model datasets\n # (and check there is only one obs dataset)\n obs = [v for v in input_data.values() if v[\"project\"] == \"OBS\"]\n if len(obs) != 1:\n msg = f\"Expected exactly 1 OBS dataset: found {len(obs)}\"\n raise RuntimeError(msg)\n clim_file = obs[0][\"filename\"]\n\n models = group_metadata(\n [v for v in input_data.values() if v[\"project\"] != \"OBS\"],\n \"dataset\")\n\n for model_dataset, group in models.items():\n # 'model_dataset' is the name of the model dataset.\n # 'group' is a list of dictionaries containing metadata.\n logger.info(\"Processing data for %s\", model_dataset)\n model_file = [item[\"filename\"] for item in group]\n\n # Input filenames for provenance\n ancestors = flatten([model_file, clim_file])\n\n # Calculate metrics\n metrics = land_sm_top(clim_file, model_file, model_dataset, config,\n ancestors)\n\n # Write metrics\n metrics_dir = os.path.join(\n config[\"plot_dir\"],\n f\"{config['exp_model']}_vs_{config['control_model']}\",\n config[\"area\"],\n model_dataset,\n )\n\n write_metrics(metrics_dir, metrics, config, ancestors)", "def main():\n\t# import training data\n\tfiles = [INPATH + f for f in os.listdir(INPATH) if \".json\" in f]\n\n\t# import books\n\tprint(\"Loading training data...\")\n\tbookList = loadBooks(files)\n\tprint(\"Load complete.\")\n\n\t# loop through element types and store data structure\n\tfor key, value in ELEMENTS.items():\n\t\tprint(\"Generating: %s\" % key)\n\n\t\t# set file outpath\n\t\toutfile = \"%s.json\" % key\n\t\toutpath = OUTPATH % outfile\n\n\t\tgenerateTrain(bookList, key, value, outpath)", "def main():\n if config.command == \"list-groups\":\n # Get the list of policies in JSON format for the given network\n if hasattr(config, 'accountSwitchKey'):\n groupList = listGroups(config.accountSwitchKey)\n else:\n groupList = listGroups()\n formatOutputGroupList(groupList, config.output_type)\n\n elif config.command == \"list-connectors\":\n if hasattr(config, 'accountSwitchKey'):\n connectorList = listConnectors(config.accountSwitchKey)\n else:\n connectorList = listConnectors()\n formatOutputConnectorList(connectorList, config.output_type)\n\n elif config.command == \"list-products\":\n if hasattr(config, 'accountSwitchKey'):\n productsList = listProducts(config.accountSwitchKey)\n else:\n productsList = listProducts()\n formatOutputProductList(productsList, config.output_type)\n\n elif config.command == \"list-stream-types\":\n if hasattr(config, 'accountSwitchKey'):\n streamTypeList = listStreamTypes(config.accountSwitchKey)\n else:\n streamTypeList = listStreamTypes()\n formatOutputStreamTypeList(streamTypeList, config.output_type)\n\n elif config.command == \"list-streams\":\n if hasattr(config, 'accountSwitchKey'):\n streamList = listStreams(config.groupid,config.streamstatus,config.accountSwitchKey)\n else:\n streamList = listStreams(config.groupid,config.streamstatus)\n formatOutputStreamList(streamList, config.output_type)\n\n elif config.command == \"list-properties\":\n if hasattr(config, 'accountSwitchKey'):\n propertiesList = listProperties(config.groupid,config.productId,config.accountSwitchKey)\n else:\n propertiesList = listProperties(config.groupid,config.productId)\n formatOutputPropertiesList(propertiesList, config.output_type)\n\n elif config.command == \"list-error-streams\":\n if hasattr(config, 'accountSwitchKey'):\n errorstreamList = listErrorStreams(config.groupid,config.accountSwitchKey)\n else:\n errorstreamList = listErrorStreams(config.groupid)\n formatOutputErrorStreamList(errorstreamList, config.output_type)\n\n elif config.command == \"create\":\n # Opening JSON file\n f = open(config.file.name,'r')\n data = json.load(f)\n json_string = json.dumps(data) #Very Important since when you read it will be in single quotes, it need to be dumped to json and strings are only valid only in double quotes\n\n if hasattr(config, 'accountSwitchKey'):\n createResponse = createStream(json_string,config.accountSwitchKey)\n else:\n createResponse = createStream(json_string)\n formatOutputActDeactResp(createResponse)\n\n elif config.command == \"update\":\n # Opening JSON file\n f = open(config.file.name,'r')\n data = json.load(f)\n json_string = json.dumps(data) #Very Important since when you read it will be in single quotes, it need to be dumped to json and strings are only valid only in double quotes\n print(json_string)\n if hasattr(config, 'accountSwitchKey'):\n updateResponse = updateStream(json_string,config.streamid,config.accountSwitchKey)\n else:\n updateResponse = updateStream(json_string,config.streamid)\n formatOutputActDeactResp(updateResponse)\n\n\n elif config.command == \"get-stream\":\n if hasattr(config, 'accountSwitchKey'):\n streamDetail = getStream(config.streamid,config.accountSwitchKey)\n else:\n streamDetail = getStream(config.streamid)\n formatOutputStreamDetail(streamDetail, config.output_type)\n\n elif config.command == \"activation-history\":\n if hasattr(config, 'accountSwitchKey'):\n activationHistory = getStreamActHistory(config.streamid,config.accountSwitchKey)\n else:\n activationHistory = getStreamActHistory(config.streamid)\n formatOutputActHistory(activationHistory, config.output_type)\n\n elif config.command == \"stream-history\":\n if hasattr(config, 'accountSwitchKey'):\n streamHistory = getStreamHistory(config.streamid,config.accountSwitchKey)\n else:\n streamHistory = getStreamHistory(config.streamid)\n formatOutputStreamHistory(streamHistory, config.output_type)\n\n elif config.command == \"list-datasets\":\n if hasattr(config, 'accountSwitchKey'):\n datasetList = getDatasets(config.template,config.accountSwitchKey)\n else:\n datasetList = getDatasets(config.template)\n formatOutputDatasetList(datasetList, config.output_type)\n\n elif config.command == \"activate\":\n if hasattr(config, 'accountSwitchKey'):\n activateResponse = activateStream(config.streamid,config.accountSwitchKey)\n else:\n activateResponse = activateStream(config.streamid)\n formatOutputActDeactResp(activateResponse)\n\n elif config.command == \"deactivate\":\n if hasattr(config, 'accountSwitchKey'):\n deactivateResponse = deActivateStream(config.streamid,config.accountSwitchKey)\n else:\n deactivateResponse = deActivateStream(config.streamid)\n formatOutputActDeactResp(deactivateResponse)\n\n elif config.command == \"delete\":\n if hasattr(config, 'accountSwitchKey'):\n deleteResponse = deleteStream(config.streamid,config.accountSwitchKey)\n else:\n deleteResponse = deleteStream(config.streamid)\n formatOutputActDeactResp(deleteResponse)", "def main():\n \n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')", "def main():\n # Specify path\n training_filepath = 'data/training.csv'\n testing_filepath = 'data/public_test_features.csv'\n\n # Check whether the specified path exists or not\n isExist = os.path.exists(training_filepath)\n if(isExist):\n print('Reading from ' + training_filepath)\n else:\n print('Training file not found in the app path.')\n exit()\n preprocess_file(training_filepath, 'data/clean_training1.csv', True)\n # Check whether the specified path exists or not\n isExist = os.path.exists(testing_filepath)\n if(isExist):\n print('Reading from ' + testing_filepath)\n else:\n print('Testing file not found in the app path.')\n exit()\n preprocess_file(testing_filepath,'data/clean_testing1.csv', False)", "def main():\n if len(sys.argv) == 4:\n\n messages_path, categories_path, database_path = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_path, categories_path))\n df = load_data(messages_path, categories_path)\n\n print('Cleaning data...')\n df = clean_data(df)\n\n print('Saving data...\\n DATABASE: {}'.format(database_path))\n save_data(df, database_path)\n\n print('Cleaned data saved to database!')\n\n else:\n print('Please provide the filepaths of the messages and categories '\n 'datasets as the first and second argument respectively, as '\n 'well as the filepath of the database to save the cleaned data '\n 'to as the third argument. \\n\\nExample: python process_data.py '\n 'disaster_messages.csv disaster_categories.csv '\n 'DisasterResponse.db')", "def main() -> None:\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print(\n \"Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}\".format(\n messages_filepath, categories_filepath\n )\n )\n df = load_data(messages_filepath, categories_filepath)\n\n print(\"Cleaning data...\")\n df = clean_data(df)\n\n print(\"Saving data...\\n DATABASE: {}\".format(database_filepath))\n save_data(df, database_filepath)\n\n print(\"Cleaned data saved to database!\")\n\n else:\n print(\n \"Please provide the filepaths of the messages and categories \"\n \"datasets as the first and second argument respectively, as \"\n \"well as the filepath of the database to save the cleaned data \"\n \"to as the third argument. \\n\\nExample: python process_data.py \"\n \"disaster_messages.csv disaster_categories.csv \"\n \"DisasterResponse.db\"\n )", "def main(unused_argv):\n\n # Read the scene file.\n with open(FLAGS.scene_path, 'r') as file_id:\n scenes = json.load(file_id)\n\n # Read the synonyms file.\n with open(FLAGS.synonym_path, 'r') as file_id:\n synonyms = json.load(file_id)\n sorter = lambda x: len(x[0].split(' '))\n\n # Read the metainformation file.\n with open(FLAGS.metainfo_path, 'r') as file_id:\n gvars.METAINFO = json.load(file_id)\n tag_inv_map = {attr: tag for tag, attr in gvars.METAINFO['tag_map'].items()\n if tag != '<P>'}\n gvars.METAINFO['tag_inv_map'] = tag_inv_map\n gvars.METAINFO['synonym_keys'] = sorted(synonyms.items(),\n key=sorter, reverse=True)\n\n # Add ids to objects.\n scenes = utils.add_object_ids(scenes)\n scenes = utils.clean_object_attributes(scenes)\n\n # Read the caption templates.\n template_paths = os.listdir(FLAGS.caption_template_root)\n cap_templates = []\n for ii in template_paths:\n with open(os.path.join(FLAGS.caption_template_root, ii), 'r') as file_id:\n cur_templates = json.load(file_id)\n cap_templates.extend(cur_templates)\n #utils.pretty_print_templates(cap_templates, 1)\n\n # Read the question templates.\n template_paths = os.listdir(FLAGS.question_template_root)\n ques_templates = []\n for ii in template_paths:\n with open(os.path.join(FLAGS.question_template_root, ii), 'r') as file_id:\n cur_templates = json.load(file_id)\n ques_templates.extend(cur_templates)\n #utils.pretty_print_templates(ques_templates, 1)\n\n # 1. Check if there a scene_id_file specified.\n # 2. Check if num_images is -1\n if FLAGS.scene_id_file != '':\n with open(FLAGS.scene_id_file, 'r') as file_id:\n missing_ids = [int(ii.strip('\\n')) for ii in file_id.readlines()]\n print('Dialogs missing for scenes: %d' % len(missing_ids))\n\n # Create a image_index -> scenes list index dictionary\n image_list_id_dict = {ii['image_index']: index\n for index, ii in enumerate(scenes['scenes'])}\n scenes_subset = [scenes['scenes'][image_list_id_dict[scene_id]]\n for scene_id in missing_ids]\n\n elif FLAGS.num_images == -1:\n scenes_subset = scenes['scenes']\n\n else:\n scenes_subset = scenes['scenes'][0: FLAGS.num_images]\n\n # BFS for each scene.\n if FLAGS.num_workers == 1:\n # Single thread version.\n dialogs = []\n for index, scene in enumerate(scenes_subset):\n cur_time = time.strftime('%a-%d%b%y-%X', time.gmtime())\n print('Generating [ %s ] [ Worker: %d, Progress: %d/%d Scene: %d ]' %\\\n (cur_time, 0, index, len(scenes_subset), scene['image_index']))\n gen_dialog = generate_dialog_bfs(scene, cap_templates, ques_templates)\n dialogs.append(gen_dialog)\n\n else:\n # Multithread version.\n output_q = multiprocessing.Queue()\n jobs = []\n for worker_id in range(FLAGS.num_workers):\n allotment = scenes_subset[worker_id::FLAGS.num_workers]\n inputs = (allotment, cap_templates, ques_templates)\n inputs += (worker_id, output_q)\n\n process = multiprocessing.Process(target=worker, args=inputs)\n jobs.append(process)\n process.start()\n\n # Wait for all the jobs to finish and collect the output.\n final_results = {}\n for _ in jobs:\n final_results.update(output_q.get())\n for job in jobs:\n job.join()\n\n # Flatten and sort.\n final_results = [jj for _, ii in final_results.items() for jj in ii]\n dialogs = sorted(final_results, key=lambda x: x['image_index'])\n # utils.pretty_print_dialogs(dialogs)\n\n # Save the dialogs.\n print('Saving dialog at: %s' % FLAGS.save_path)\n with open(FLAGS.save_path, 'w') as file_id:\n json.dump(dialogs, file_id)", "def Run():\n file_name = AskForFileName()\n file_content = ReadFileContents(file_name)\n head_list = BuildHeadList(file_content)\n atom_list = BuildAtomList(file_content)\n tail_list = BuildTailList(file_content)\n WriteNewFile(head_list, atom_list, tail_list)", "def run_program():\n print('Biofilm assay selected.')\n x = input('Enter data manually or automatically?')\n\n if str.lower(x) == 'manually':\n z = BiofilmCfuCount(manual_input(), (input('Enter number of conditions:'), input('Enter plated volume:')))\n z.run_and_plot()\n\n elif str.lower(x) == 'automatically':\n y = input('Enter file name with extension:')\n b = r\"C:\\Users\\Andres\\Documents\\HTML\\Smutans\" + '\\\\' + str(y)\n a = BiofilmCfuCount(auto_input(b), (input('Enter number of conditions:'), input('Enter plated volume:')))\n a.run_and_plot()", "def main():\r\n \r\n data_dir = Path.cwd().joinpath('OUTPUT')\r\n config_dir = Path.cwd().joinpath('CONFIG')\r\n \r\n # Load deduplicated comments\r\n data = utils.load(data_dir, 'student_comment_deduplicated')\r\n \r\n # Get the luis API url\r\n with open(config_dir.joinpath('luis_url.txt'), 'r') as f:\r\n luis_url = f.readline()\r\n \r\n request_api(\r\n data,\r\n luis_url,\r\n 1000,\r\n )", "def main():\n\n for i in range(1, 4):\n print(\"\\nSAMPLE INPUT {}\".format(i))\n\n playlist = Playlist()\n\n filename = \"testinput{}.txt\".format(i)\n\n with open(filename, 'r') as testfile:\n operation_list = testfile.read().splitlines()\n\n for line in operation_list:\n operation = line.split(',')\n op_type = operation[0]\n if op_type == 'ADD':\n title, artist, genre, is_fav = operation[1:]\n playlist.add(Track(title, artist, genre, is_fav))\n elif op_type == 'DELTITLE':\n title = operation[1]\n playlist.delete_title(title)\n elif op_type == 'DELPOS':\n position = int(operation[1])\n playlist.delete_position(position)\n elif op_type == 'MOVE':\n old_pos, new_pos = int(operation[1]), int(operation[2])\n playlist.move(old_pos, new_pos)\n elif op_type == 'COUNTGENRE':\n genre = operation[1]\n playlist.count_genre(genre)\n elif op_type == 'COUNTFAV':\n playlist.count_favourite()\n elif op_type == 'PRINT':\n playlist.print_playlist()", "def main():\n\n parser = argparse.ArgumentParser(\n description='Convert collection of bibtex files to clean markdown script.')\n parser.add_argument('-i', '--input', nargs='+', required=False,\n help='Input bibtex files. Defaults to bib/*')\n parser.add_argument('-o', '--output', default='publications.md', required=False,\n help='Output markdown floadile.')\n args = parser.parse_args()\n\n # Build setup\n os.makedirs(BUILD_ROOT, exist_ok=True)\n build_dir = os.path.join(\n BUILD_ROOT,\n datetime.now().strftime(datetime.now().strftime('%Y_%m_%d_%H_%M_%S_%f')))\n os.makedirs(build_dir)\n\n # Find and merge all bibtex files\n input_files = args.input\n if input_files is None:\n input_files = [os.path.join(BIB_ROOT, f) for f in os.listdir(BIB_ROOT)\n if os.path.isfile(os.path.join(BIB_ROOT, f))]\n merge_file = merge_bibtex(input_files, build_dir)\n\n # Parse merged bibtex file with custom options\n parse_file = parse_bibtex(merge_file, build_dir)\n\n # Render bibtex to markdown\n render_file = render_bibtex(parse_file, build_dir, args.output)", "def pipeline_runner():\n # file_parser() # take raw data file and extract columns of interest. remove contaminants.\n entry_parser() # remove duplicates, faulty lines and format the whole thing normally.\n lfq_parser() # replace 0s in lfq reading with random small numbers for t testing purposes\n # open Rstudio and do T testing there\n ROutputFormatter() # reformat R output to something more appealing, add FDR and fold change values", "def main():\n parser = argparse.ArgumentParser(description=\"\"\"\n Converts the class list provided by MSU's Office of the Registrar\n to a format that HackerRank can use to invite candidates.\n \"\"\")\n parser.add_argument('class_list_filename')\n parser.add_argument('hackerrank_candidate_list_filename')\n\n args = parser.parse_args()\n convert_class_list(args.class_list_filename,\n args.hackerrank_candidate_list_filename)", "def collect():\n # catch possible bug with --dashes.md\n os.chdir(pathlib.Path(__file__).resolve().parent / 'test')\n for filename in glob.glob('*.md'):\n stem, _ = os.path.splitext(filename)\n args = ['../readme.py', '--timeout=1', '--', filename]\n result = cmd(args)\n obj = normalize(result['output'])\n tmp = obj['tmp']\n lisp = slurp(tmp) if tmp else None\n yield stem, result['exit_code'], obj['out'], lisp", "def main(args):\n # Results: print to console and also write to output file\n pass" ]
[ "0.714176", "0.6949852", "0.68220896", "0.67618006", "0.66505563", "0.6622678", "0.6581008", "0.6578231", "0.65768015", "0.65547335", "0.6551714", "0.6534173", "0.6517532", "0.6512704", "0.6500041", "0.64553374", "0.6445541", "0.644022", "0.6437129", "0.6432362", "0.64092803", "0.6403037", "0.6399", "0.63958985", "0.638647", "0.637234", "0.6368847", "0.63662267", "0.6365276", "0.63598526", "0.6355312", "0.6332047", "0.6328668", "0.63229483", "0.6322131", "0.6321216", "0.6312393", "0.63055843", "0.6304681", "0.6286935", "0.6276373", "0.627384", "0.6267013", "0.6262687", "0.6257482", "0.6254251", "0.62461853", "0.6238397", "0.622233", "0.6221043", "0.62122476", "0.62107605", "0.62107605", "0.62078804", "0.62071866", "0.62067753", "0.6204125", "0.62022644", "0.6185475", "0.6176094", "0.61750555", "0.61680526", "0.61671096", "0.61635596", "0.6160775", "0.6159932", "0.61550206", "0.61526865", "0.6142207", "0.6141759", "0.6139089", "0.61337113", "0.6126606", "0.612659", "0.6126335", "0.6124383", "0.6116991", "0.6104312", "0.61033344", "0.6101116", "0.609558", "0.60948783", "0.60905343", "0.6079539", "0.60736936", "0.6070415", "0.6067412", "0.6067195", "0.6064183", "0.6060982", "0.60603565", "0.6058798", "0.6055705", "0.60534203", "0.60529095", "0.60490394", "0.60468996", "0.604646", "0.60437524", "0.6037745" ]
0.6324489
33
Get the adjoint for an arbitrary dimension input.
def get_reduced_indices(*indices, axis, keepdims): # get all indices indices_list = list(indices) # list of reduction axis: transform negative indices into positive # axis in this list wont exist after the reduction axis_list = ft_util.refine_reduce_axis(indices_list, list(axis)) # get indices after reduction if keepdims: grad_indices_list = [index_i if i not in axis_list else 0 for i, index_i in enumerate(indices_list)] else: grad_indices_list = [index_i for i, index_i in enumerate(indices_list) if i not in axis_list] grad_ind = tuple(grad_indices_list) return grad_ind
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def adjoint(self: T) -> types.Matrix:", "def adjoint(self):\n if self.domain.field != self.range.field:\n raise NotImplementedError('adjoint not defined since fields '\n 'of domain and range differ ({} != {})'\n ''.format(self.domain.field,\n self.range.field))\n return KMatrixFFT2(self.dist_matrix.conj().T,\n domain=self.range, range=self.domain)", "def adjoint(self):\n return self.cofactorMatrix().transpose()", "def adjoint(self):\n return self.conjugate().transpose()", "def adjoint(self):\n if self.domain.field != self.range.field:\n raise NotImplementedError('adjoint not defined since fields '\n 'of domain and range differ ({} != {})'\n ''.format(self.domain.field,\n self.range.field))\n return KFullMatrix(self.cost_matrix.conj().T,\n domain=self.range, range=self.domain)", "def adjoint(self):\n phi = self.parameters[0]\n dim, _ = self.hyperparameters[\"dimension\"]\n return PCPhase(-1 * phi, dim=dim, wires=self.wires)", "def adjoint(self,add,mod,dat):\n if(mod.shape[0] != self.__nm or dat.shape[0] != self.__nd):\n raise Exception(\"lint adjoint: input shapes do not match those passed to constructor\")\n\n if(add == False):\n mod[:] = 0.0\n\n adjoint_lint(self.__om,self.__dm,self.__nm,self.__nd,self.__crd,mod,dat)", "def adjoint(self): # pragma: no cover\r\n raise NotImplementedError()", "def op_adj(self):\n return AdjointOperator(self.model, save=None, geometry=self.geometry,\n kernel=self.kernel, space_order=self.space_order,\n **self._kwargs)", "def adjoint(self, anm: np.ndarray, nside: Optional[int] = None) -> np.ndarray:\n nside = self.nside if nside is None else nside\n return hp.alm2map(alms=anm, nside=nside, lmax=self.n_max, verbose=self.verbose)", "def adjoint(self, bn: np.ndarray) -> np.ndarray:\n p0 = 0 * self.t + 1\n p1 = self.t.copy()\n\n b = bn[0] * p0 + bn[1] * p1 * 3\n\n for n in np.arange(2, self.n_max + 1):\n p2 = (self.t * p1 * (2 * n - 1) - p0 * (n - 1)) / n\n p0 = p1\n p1 = p2\n b += bn[n] * p2 * (2 * n + 1)\n\n b /= 4 * np.pi\n\n return b", "def adjoint(self):\n data = []\n for i in range(1, self.rows + 1):\n for j in range(1, self.columns + 1):\n data.append(self._cofactor(i, j))\n\n mat = Matrix(self.rows, self.columns, data)\n return mat.transpose()", "def addem(inarr):\n return np.expand_dims(inarr, axis=1)", "def adjoint(self, inputs, outputs):\n for output in outputs:\n np.copyto(output, inputs[0])", "def _adjoint(op):\n if isinstance(op, list):\n adjoint_op = []\n for item in op:\n if isinstance(item, list):\n assert len(item) == 2\n adjoint_op.append([item[0].dag(), item[1]])\n else:\n adjoint_op.append(item.dag())\n return adjoint_op\n else:\n return op.dag()", "def adjoint(self, add, model, data):\n self.checkDomainRange(model, data)\n self.ops[0].adjoint(add, model, data.vecs[0])\n for idx in range(1, self.n):\n self.ops[idx].adjoint(True, model, data.vecs[idx])", "def get_maybe_only_dim(darray, dim):\n if dim is None:\n if len(darray.dims) == 1:\n return darray.dims[0]\n else:\n raise ValueError(\"Specify the dimension\")\n else:\n return dim", "def adjoint(self) -> 'MultiVector':\n # The multivector created by reversing all multiplications\n return self._newMV(self.layout.adjoint_func(self.value))", "def _adj(w):\r\n return (w[:d * d] - w[d * d:]).reshape([d, d])", "def _get_joints(self, anno, idx):\n num_people = len(anno)\n\n joints = np.zeros(\n (num_people, self.ann_info['num_joints'], 3), dtype=np.float32)\n\n for i, obj in enumerate(anno):\n joints[i, :self.ann_info['num_joints'], :3] = \\\n np.array(obj['keypoints']).reshape([-1, 3])\n\n img_info = self.coco.loadImgs(self.img_ids[idx])[0]\n orgsize = np.array([img_info['height'], img_info['width'], 1])\n\n return joints, orgsize", "def get_matching_dimname(self, dimname):\n return dimname", "def dimensionality(k: Join):\n d1 = dimensionality(k[0])\n d2 = dimensionality(k[1])\n return _check_and_merge(k, d1, d2)", "def adjacency(self, dim1, dim2):\n\n if dim2 >= dim1:\n raise ValueError(\"\"\"dim2 must be less than dim1.\"\"\")\n if dim2 < 0:\n raise ValueError(\"\"\"dim2 cannot be negative.\"\"\")\n if dim1 > self.dim:\n raise ValueError(\"\"\"dim1 cannot exceed the mesh dimension.\"\"\")\n\n if dim1 == 1:\n if self.dim == 1:\n return self.cell_vertices\n else:\n return self.edge_vertices\n elif dim1 == 2:\n if dim2 == 0:\n return self.cell_vertices\n else:\n return self.cell_edges", "def adjoint(self) -> OperatorBase:\n raise AquaError('Adjoint of a CVaR measurement not defined')", "def get_joints(self, anno: List[Mapping[str, Any]]) -> np.ndarray:\n joints = []\n\n for i, obj in enumerate(anno):\n keypoints = np.array(obj[\"keypoints\"]).reshape([-1, 3])\n joints.append(keypoints)\n\n num_instances = len(joints)\n joints = np.array(joints, dtype=np.float32).reshape((num_instances, self.num_joints, 3))\n return joints", "def adjoint(self, inputs, outputs):\n\n inimg = inputs[0]\n inimg_reshaped = inimg.reshape((inimg.shape[0] * inimg.shape[1], inimg.shape[2]))\n result = np.dot(self.TC.T, inimg_reshaped.T).T.reshape(inimg.shape)\n np.copyto(outputs[0], result)", "def joint_adr(self, joint_name):\n jntadr = mjlib.mj_name2id(self.ptr, C.mjOBJ_JOINT, joint_name)\n assert (jntadr >= 0)\n dofmap = {C.mjJNT_FREE: 7,\n C.mjJNT_BALL: 4,\n C.mjJNT_SLIDE: 1,\n C.mjJNT_HINGE: 1}\n qposadr = self.jnt_qposadr[jntadr][0]\n qveladr = self.jnt_dofadr[jntadr][0]\n dof = dofmap[self.jnt_type[jntadr][0]]\n return (qposadr, qveladr, dof)", "def gap2d(_w_in):\n return nn.AdaptiveAvgPool2d((1, 1))", "def getA(self, idx):\n if isinstance(idx, int):\n return self.dA[[idx]]\n else:\n return self.dA[idx]", "def adj_batch():\n return torch.Tensor([[[1, 3], [3, 1]], [[7, 8], [8, 7]]])", "def adj(self):\n\n d = self.rank\n permutation = [0] * d\n permutation[::2] = range(1, d, 2)\n permutation[1::2] = range(0, d, 2)\n t = np.conj(self._t).transpose(permutation)\n return self.__class__(t)", "def _merge_beam_dim(tensor: Any) ->Any:\n if not isinstance(tensor, torch.Tensor):\n return tensor\n shape = list(tensor.size())\n shape[0] *= shape[1]\n shape.pop(1)\n return tensor.view(tuple(shape))", "def dd_axis(axis, ambient_dim, operand):\n d = Derivative()\n\n unit_vector = np.zeros(ambient_dim)\n unit_vector[axis] = 1\n\n unit_mvector = MultiVector(unit_vector)\n\n return d.resolve(\n (unit_mvector.scalar_product(d.dnabla(ambient_dim)))\n * d(operand))", "def GetLinearDimension(dimension):\r\n pass", "def joint_dataset(l1, l2):\n N = np.max(l1) + 1\n return l2 * N + l1", "def _maybe_expand_dims(x):\n x = tf.convert_to_tensor(x)\n if x.shape == ():\n return tf.expand_dims(x, axis=0)\n return x", "def get_dimension(self, name):\n for dim in self.dimensions:\n if dim.name == name:\n return dim\n\n return None", "def input_dim(use_word_emb, embedding_dim, word_emb_enc_dim):\n return embedding_dim + use_word_emb * word_emb_enc_dim", "def get_joint_number(self) -> int:\n return self.DoF", "def adjacency(self):\n if self.E > 0:\n i = self.edges[:, 0]\n j = self.edges[:, 1]\n adj = coo_matrix((np.ones(self.E), (i, j)),\n shape=(self.V, self.V))\n else:\n adj = coo_matrix((self.V, self.V))\n return adj", "def adjoint(self, rec, srca=None, v=None, vp=None, **kwargs):\n # Create a new adjoint source and receiver symbol\n srca = srca or self.geometry.new_src(name='srca', src_type=None)\n\n # Create the adjoint wavefield if not provided\n v = v or TimeFunction(name='v', grid=self.model.grid,\n time_order=2, space_order=self.space_order)\n\n # Pick vp from model unless explicitly provided\n vp = vp or self.model.vp\n\n # Execute operator and return wavefield and receiver data\n summary = self.op_adj().apply(srca=srca, rec=rec, v=v, vp=vp,\n dt=kwargs.pop('dt', self.dt), **kwargs)\n return srca, v, summary", "def get_adj(self, arr, no_agents=5):\n k_lst = [2, 3]\n points = [i[2:4] for i in arr]\n adj = np.ones((no_agents, no_agents), dtype=float)\n tree = cKDTree(points)\n for cnt, row in enumerate(points):\n dd, ii = tree.query(row, k=k_lst)\n adj[cnt][ii] = 1\n adj = np.fill_diagonal(adj, 0)\n return adj", "def get_dimension(self, dim_id):\n for dim in self.dimensions:\n if dim_id == dim.id:\n return dim", "def construct_joint(self, x):\n channel = x.reshape((self._crv_size, self._bound))\n channel /= channel.sum(axis=1, keepdims=True)\n channel[np.isnan(channel)] = self._mask[np.isnan(channel)]\n slc = (len(self._pmf.shape) - 1)*[np.newaxis] + 2*[colon]\n joint = self._pmf[..., np.newaxis] * channel[slc]\n\n return joint", "def _expand(x, ndim, axis=0):\n while F.rank(x) < ndim:\n x = F.expand_dims(x, axis)\n return x", "def joint(G, xs=None):\n vars = G.vars() #: [var]\n facs = { f : G.N(f) for f in G.facs() } #: fac => vars\n\n dims = [G.node[x]['d'] for x in vars] #: [nat]\n _joint = ones(dims)\n\n for vals in itertools.product( *(xrange(d) for d in dims) ): # cartesian product\n _vars = dict(zip(vars,vals)) #: var => val\n vals = tuple(vals) # to index\n #print\n #print _vars\n for fac in facs:\n _vals = [_vars[v] for v in facs[fac]] # keep only fac's vars' vals\n #print '%s%s' % (fac, tuple(_vals))\n _joint[vals] *= G(fac, *_vals)\n\n Z = sum(_joint)\n\n return pd(_joint), Z", "def one_dim_index(self, i, j):\n return int(i + j * self.nx)", "def _getdim(x):\n \n if np.ndim(x) > 1:\n \n dim = x.shape[-1]\n \n else:\n \n dim = 0\n \n return dim", "def bond(self):\r\n return self.A.shape[-1]", "def joint(self):\n return GraphModel(self.factors).joint()", "def with_batch_dim(x: torch.Tensor) -> torch.Tensor:\n if x.dim() == 1:\n return x.unsqueeze(0)\n else:\n return x", "def get_input_dim(self) -> int:\n raise NotImplementedError", "def derivativeX(self, *args):\n if self.n_dims >= 4:\n j = 1\n else:\n j = 0\n if self.i_dim == j:\n return np.ones_like(*args[0])\n else:\n return np.zeros_like(*args[0])", "def adjacency(dist, idx):\n M, k = dist.size()\n # assert M, k == idx.shape\n # assert dist.min() >= 0\n\n # Weights.\n sigma2 = torch.mean(dist[:, -1])**2\n dist = torch.exp(- dist**2 / sigma2)\n\n # Weight matrix.\n I = torch.arange(0, M).repeat_interleave(k).contiguous().view(1, -1).cuda()\n J = idx.contiguous().view(1, -1)\n V = dist.contiguous().view(-1)\n indices = torch.cat([I, J], dim=0)\n W = torch.sparse.FloatTensor(indices, V, torch.Size([M, M])).cuda()\n # W = scipy.sparse.coo_matrix((V.cpu().numpy(), (I.cpu().numpy(), J.cpu().numpy())), shape=(M, M))\n\n # No self-connections.\n # W.setdiag(1)\n\n # Non-directed graph.\n # bigger = W.T > W\n # W = W - W.multiply(bigger) + W.T.multiply(bigger)\n #\n # assert W.nnz % 2 == 0\n # assert np.abs(W - W.T).mean() < 1e-10\n # assert type(W) is scipy.sparse.csr.csr_matrix\n return W", "def _array_name_1D_to_ND(self, name):\n for k, v in self._split_arrays.items():\n if name in v:\n return k\n\n generic_match = re.findall(\"^(.+)_[xyz]$\", name)\n if len(generic_match) == 1 and generic_match[0] not in self._split_arrays:\n return generic_match[0]\n\n return None", "def d_input(self):\n return np.reshape(self.next_layer.d_input(), (-1,) + self.input_shape)", "def adjacency(self,kind='e'):\n inv = self.inverse()\n if kind == 'e':\n adj = inv[self].reshape((self.nelems(),-1))\n elif kind == 'n':\n adj = concatenate([where(inv>=0,self[:,i][inv],inv) for i in range(self.nplex())],axis=1)\n else:\n raise ValueError,\"kind should be 'e' or 'n', got %s\" % str(kind) \n return reduceAdjacency(adj)", "def adjoint(self, inputs, outputs):\n super(copy, self).forward(inputs, outputs)", "def Adj(self, vertex_name: n) -> list:\n return self._graph[vertex_name].get_connections()", "def diagonalise(self, input, batch):\n if len(input.size()) == 1:\n return torch.diag(input)\n if len(input.size()) == 2:\n if not batch:\n return torch.diag(vec(input))\n else:\n bdiag = torch.Tensor().to(self.device)\n for i in range(input.size()[0]):\n bdiag = torch.cat((bdiag, torch.diag(input[i]).unsqueeze(0)), axis=0)\n return bdiag\n\n if len(input.size()) == 3 and batch:\n bdiag = torch.Tensor()\n for i in range(input.size()[0]):\n bdiag = torch.cat((bdiag, torch.diag(vec(input[i])).unsqueeze(0)), axis=0)\n\n return bdiag\n else:\n print('Dimension of inpout tensor should only be 1,2,3.')", "def _argmin_or_argmax_grad(x, axis, keep_dims, op, out, dout):\n expand = P.ExpandDims()\n x_shape = F.shape(x)\n x_dim = len(x_shape)\n x_axis = axis\n if x_axis < 0:\n x_axis = axis + x_dim\n onehot_axis = x_axis\n depth = x_shape[x_axis]\n if keep_dims:\n dout_expand = dout[1]\n out = op(x)\n else:\n dout_expand = expand(dout[1], onehot_axis)\n if onehot_axis >= len(shape_op(out[0])):\n onehot_axis = -1\n onehot = P.OneHot(onehot_axis)\n type_x = F.dtype(x)\n on_value = F.cast(F.scalar_to_array(1.0), type_x)\n off_value = F.cast(F.scalar_to_array(0.0), type_x)\n dx = dout_expand * onehot(out[0], depth, on_value, off_value)\n return dx", "def embeddings_layer(x, Wemb, dim_proj):\n\n n_words = x.shape[0]\n n_max_letters_in_word = x.shape[1]\n n_batch = x.shape[2]\n\n dist = Wemb[x.flatten()].reshape([n_words, n_max_letters_in_word, n_batch, dim_proj])\n return dist", "def get_embed(input_data, vocab_size, embed_dim):\n embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, input_data)\n\n return embed", "def expand_dims(input, axis, _builder=None):\n axis = _constexpr_to_value(axis)\n axes = list(axis) if isinstance(axis, Sequence) else [axis]\n new_ndim = len(input.shape) + len(axes)\n axes = [_wrap_axis(_constexpr_to_value(d), new_ndim) for d in axes]\n\n if len(set(axes)) != len(axes):\n raise ValueError(f\"expand_dims recieved duplicate axes, normalized axes = {axes}\")\n\n ret = input\n for a in sorted(axes):\n ret = semantic.expand_dims(ret, a, _builder)\n return ret", "def diag_indices_from(x1):\n\n x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)\n if x1_desc:\n # original limitation\n if not x1_desc.ndim >= 2:\n pass\n\n # original limitation\n # For more than d=2, the strided formula is only valid for arrays with\n # all dimensions equal, so we check first.\n elif not numpy.alltrue(\n numpy.diff(x1_desc.shape) == 0\n ): # TODO: replace alltrue and diff funcs with dpnp own ones\n pass\n else:\n return dpnp_diag_indices(x1_desc.shape[0], x1_desc.ndim)\n\n return call_origin(numpy.diag_indices_from, x1)", "def dim(self):\n\t\treturn self.D", "def _mpo_get_d(self, W):\n din = W.shape[3]\n dout = W.shape[1]\n return dout, din", "def getDimension(unique_name):", "def getDimension(unique_name):", "def one_dim(a: cython.double[:]):\n a[0] *= 2\n return a[0], a.ndim", "def getindex(ndim, ind, strides):\n ret = 0\n for i in range(ndim):\n ret += strides[i] * ind[i]\n return ret", "def getindex(ndim, ind, strides):\n ret = 0\n for i in range(ndim):\n ret += strides[i] * ind[i]\n return ret", "def get_ind(self,*q):\n try:\n if( len(q) == 1 ):\n x = q[0][:,0]\n y = q[0][:,1]\n z = q[0][:,2]\n else:\n x = q[0]\n y = q[1]\n z = q[2]\n try:\n cx = (x+0.5).astype(na.int32)\n cy = (y+0.5).astype(na.int32)\n cz = (z+0.5).astype(na.int32)\n except:\n cx = int(x+0.5)\n cy = int(y+0.5)\n cz = int(z+0.5)\n ind = cx + cy*self.dim[0]+cz*self.dim[0]*self.dim[1]\n return ind\n except Exception as error:\n print(error)\n return None", "def infer_leading_dims(tensor, dim):\n lead_dim = tensor.dim() - dim\n assert lead_dim in (0, 1, 2)\n if lead_dim == 2:\n T, B = tensor.shape[:2]\n else:\n T = 1\n B = 1 if lead_dim == 0 else tensor.shape[0]\n shape = tensor.shape[lead_dim:]\n return lead_dim, T, B, shape", "def fetch_query_dimension(self, dimension_alias):\n for dimension in self._dimensions:\n unwrapped_dimension = find_field_in_modified_field(dimension)\n if unwrapped_dimension.alias == dimension_alias:\n return dimension\n\n return None", "def target(w, z):\n return log_joint(data_dim=data_dim,\n latent_dim=latent_dim,\n num_datapoints=num_datapoints,\n stddv_datapoints=stddv_datapoints,\n w=w, z=z, x=x_train)", "def dim(self):\n return self._d", "def batch_diag_part(in_tensor, batch_size):\n tensor_list = tf.split(split_dim=0, num_split=batch_size, value=in_tensor)\n tensor_list = [tf.expand_dims(tf.diag_part(tf.squeeze(t, [0])), 0) for t in tensor_list]\n return tf.concat(0, tensor_list)", "def get_embed(input_data, vocab_size, embed_dim):\n # todo 需要编程:\n # 1、构建嵌入矩阵的查找表\n lookup_w = tf.Variable(\n initial_value=tf.random_uniform([vocab_size, embed_dim], -1.0, 1.0)\n )\n # 2、获得嵌入输出\n embed = tf.nn.embedding_lookup(params=lookup_w, ids=input_data)\n # [N, n_steps, embed_size]\n return embed", "def _dimensionalize(x: np.ndarray, lb: np.ndarray, ub: np.ndarray) -> np.ndarray:\n return lb + x * (ub - lb)", "def add_joint(joint: str, x1: int, y1: int, x2: int, y2: int) -> str:\n return joint", "def find_adjective(sent):\n adj = None\n for w, p in sent.pos_tags:\n if p == 'JJ': # This is an adjective\n adj = w\n break\n return adj", "def adjaceny_matrix(self):\n \n try:\n return self._adj_matrix\n except AttributeError:\n am = np.zeros((self.n, self.n))\n for edge, weight in self.weights.items():\n am[edge[0], edge[1]] = weight\n self._adj_matrix = am\n return self._adj_matrix", "def DiagonalGate():\n\n def f(x): # pylint: disable=invalid-name\n # x : [batch, 1, length, depth]\n x = jnp.pad(x, [(0, 0), (0, 0), (1, 1), (0, 0)],\n mode='constant', constant_values=0.0)\n depth = x.shape[-1] // 3\n assert 3 * depth == x.shape[-1], ('Depth must be divisible by 3', depth,\n x.shape)\n xs = [\n x[:, :, :-2, :depth], x[:, :, 1:-1, depth:2 * depth],\n x[:, :, 2:, 2 * depth:3 * depth]\n ]\n return jnp.concatenate(xs, axis=3)\n return tl.Fn('DiagonalGate', f)", "def eqconstr(x, problem):\n x, t_final = matrify(x, problem)\n return np.concatenate([problem['dynamics'](x[:, :, i], t_final, problem) for i in range(problem['Nv'])])", "def discriminator(self) -> Any:\r\n return self._lda", "def K_op_adjoint(self):\n return self.__K_op_adjoint", "def return_adjacencyList(self):\n return self.__adj", "def get_adv_gradient(self, observation):\n flat_obs = self.observation_space.flatten(observation)\n\n # ignore augmented part of state if necessary\n if self.mask_augmentation:\n flat_obs = flat_obs[:self.zero_gradient_cutoff]\n\n mean_grad, std_grad = [x[0] for x in self._f_grad_dist([flat_obs])]\n\n # zero-out some components if augmenting state\n if not self.mask_augmentation:\n if self.use_dynamics:\n mean_grad[:self.zero_gradient_cutoff] = 0.0\n std_grad[:self.zero_gradient_cutoff] = 0.0\n else:\n mean_grad[self.zero_gradient_cutoff:] = 0.0\n std_grad[self.zero_gradient_cutoff:] = 0.0\n\n return mean_grad + std_grad", "def get_input_dimension(self):\n return self.in_dim", "def _expand_dims(st, axis):\n if not isinstance(st, structured_tensor.StructuredTensor):\n return tf.expand_dims(st, axis)\n nn_axis = _expand_dims_nonnegative_axis(axis, st.rank)\n if st.rank == 0:\n return _expand_dims_scalar(st)\n if nn_axis == 0:\n # Here, we can add a dimension 1 at the front.\n nrows = st.nrows()\n return st.partition_outer_dimension(\n RowPartition.from_uniform_row_length(nrows, nrows))\n elif nn_axis == 1:\n # Again, by partitioning the first dimension into vectors of length 1,\n # we can solve this problem.\n nrows = st.nrows()\n return st.partition_outer_dimension(\n RowPartition.from_uniform_row_length(\n tf.constant(1, dtype=nrows.dtype), nrows))\n else:\n # Note: this is unreachable in the current code.\n raise ValueError(\"Unimplemented: non-negative axis > 1 for _expand_dims\")", "def _array_name_ND_to_1D(self, array_name):\n\n if array_name in self._split_arrays:\n array_name_1D = self._split_arrays[array_name]\n else:\n array_name_1D = [array_name + \"_\" + i for i in ('x', 'y', 'z')]\n\n return array_name_1D", "def get_dn(fn):\n\n # the matrix A in banded format\n diagonals = [np.hstack([0, 0, np.ones(n - 2)]), # zeros aren't used, so can be any value.\n np.hstack([0, -4 * np.ones(n - 2), -2]),\n np.hstack([9, 6 * np.ones(n - 3), [5, 1]]),\n np.hstack([-4 * np.ones(n - 2), -2, 0]), # make sure this -2 is in correct spot\n np.hstack([np.ones(n - 2), 0, 0])] # zeros aren't used, so can be any value.\n A = np.vstack(diagonals) * n ** 3\n\n b = -(1 / n) * np.ones(n)\n\n b[-1] += fn\n\n sol = solve_banded((2, 2), A, b)\n dn = sol[-1]\n\n return dn", "def get_fwd_diag(b):\r\n return [b[0][2], b[1][1], b[2][0]]", "def adjoint_inner_product(bra, ket, x, Q, keys=None):\n \n d = bra.domain.new_field()\n if not keys:\n keys = bra.state.field_dict.keys()\n \n for k in keys:\n if k == 'psi':\n d['g'] += bra.state[k]['g'].conj() * (ket.state['psixx']['g'] - Q**2 * ket.state[k]['g'])\n else:\n d['g'] += bra.state[k]['g'].conj() * ket.state[k]['g']\n \n return d.integrate(x)['g'][0]", "def generate_adjoints(self, adjoints, delta, x):\n x.generate_add_delta(adjoints, delta)", "def embedding_layer(n_categories, embedding_dim, name=None):\n\n input_tensor = Input(shape=(1,))\n x = Embedding(n_categories, embedding_dim, name=name)(input_tensor)\n x = Reshape(target_shape=(embedding_dim,))(x)\n\n return input_tensor, x", "def dimension(self, name: str):\n return BoundDim(self, name)", "def build_embedding_layer(inputs_, vocab_size, embed_size):\n embedding = tf.Variable(tf.random_uniform((vocab_size, embed_size), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, inputs_)\n \n return embed", "def get_adjacency_matrix(self):\n m = zeros(self.size)\n perm = self.array_form\n for i in xrange(self.size - 1):\n m[perm[i], perm[i + 1]] = 1\n return m", "def get_target_dimension(self):\n return self.out_dim" ]
[ "0.6461707", "0.627058", "0.62327474", "0.6097227", "0.60359955", "0.60107726", "0.598409", "0.5767014", "0.563796", "0.56376064", "0.5606035", "0.5533599", "0.5419022", "0.52535206", "0.5236404", "0.50762033", "0.5064964", "0.5020197", "0.49542275", "0.49254653", "0.49106458", "0.48975813", "0.48926765", "0.48499322", "0.48292878", "0.48191175", "0.4811164", "0.479448", "0.47826773", "0.47821757", "0.47624305", "0.47371304", "0.47345924", "0.47340646", "0.47318137", "0.47262433", "0.4708139", "0.46707627", "0.4667757", "0.46499515", "0.46475056", "0.46460217", "0.46396196", "0.4637414", "0.46155506", "0.46038112", "0.458872", "0.45703033", "0.4550711", "0.45295", "0.4526871", "0.4509784", "0.44975823", "0.4491273", "0.44799408", "0.44741085", "0.44700298", "0.4468107", "0.44569838", "0.44390434", "0.44352263", "0.4430504", "0.4426441", "0.44197878", "0.4419327", "0.44132403", "0.44125137", "0.4409633", "0.4409633", "0.4408293", "0.44080687", "0.44080687", "0.43998963", "0.4399217", "0.43984792", "0.43953913", "0.43911088", "0.43894997", "0.43840227", "0.43672293", "0.43663394", "0.4362463", "0.43586105", "0.43491766", "0.43444905", "0.4325468", "0.43178007", "0.4315674", "0.43146107", "0.42961094", "0.42925242", "0.429001", "0.42843187", "0.42701226", "0.4264512", "0.42617074", "0.4259931", "0.4257423", "0.42554823", "0.4251531", "0.42497322" ]
0.0
-1
Test _arrange_test_result method with only one module.
def test_arrange_test_result_one_module(self): pass_1 = self._create_test_result(status=test_runner_base.PASSED_STATUS) pass_2 = self._create_test_result(status=test_runner_base.PASSED_STATUS) pass_3 = self._create_test_result(status=test_runner_base.PASSED_STATUS) fail_1 = self._create_test_result(status=test_runner_base.FAILED_STATUS) fail_2 = self._create_test_result(status=test_runner_base.FAILED_STATUS) ignore_1 = self._create_test_result(status=test_runner_base.IGNORED_STATUS) reporter_1 = result_reporter.ResultReporter() reporter_1.all_test_results.extend([pass_1, pass_2, pass_3]) reporter_2 = result_reporter.ResultReporter() reporter_2.all_test_results.extend([fail_1, fail_2, ignore_1]) info_dict = {} aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2]) expect_summary = {aei._STATUS_IGNORED_KEY : 1, aei._STATUS_FAILED_KEY : 2, aei._STATUS_PASSED_KEY : 3} self.assertEqual(expect_summary, info_dict[aei._TOTAL_SUMMARY_KEY])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_arrange_test_result_multi_module(self):\n group_a_pass_1 = self._create_test_result(group_name='grpup_a',\n status=test_runner_base.PASSED_STATUS)\n group_b_pass_1 = self._create_test_result(group_name='grpup_b',\n status=test_runner_base.PASSED_STATUS)\n group_c_pass_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.PASSED_STATUS)\n group_b_fail_1 = self._create_test_result(group_name='grpup_b',\n status=test_runner_base.FAILED_STATUS)\n group_c_fail_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.FAILED_STATUS)\n group_c_ignore_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.IGNORED_STATUS)\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([group_a_pass_1, group_b_pass_1, group_c_pass_1])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([group_b_fail_1, group_c_fail_1, group_c_ignore_1])\n\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 0,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_a_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_a'][aei._SUMMARY_KEY])\n\n expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 1,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_b_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_b'][aei._SUMMARY_KEY])\n\n expect_group_c_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 1,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_c_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_c'][aei._SUMMARY_KEY])\n\n expect_total_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])", "def test_arrange_test_result_multi_runner(self):\n runner_a_pass_1 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_a_pass_2 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_a_pass_3 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_b_fail_1 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.FAILED_STATUS)\n runner_b_fail_2 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.FAILED_STATUS)\n runner_b_ignore_1 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.IGNORED_STATUS)\n\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([runner_a_pass_1, runner_a_pass_2, runner_a_pass_3])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([runner_b_fail_1, runner_b_fail_2, runner_b_ignore_1])\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 0,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(\n expect_group_a_summary,\n info_dict[aei._TEST_RUNNER_KEY]['runner_a']['someModule'][aei._SUMMARY_KEY])\n\n expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 0}\n self.assertEqual(\n expect_group_b_summary,\n info_dict[aei._TEST_RUNNER_KEY]['runner_b']['someModule'][aei._SUMMARY_KEY])\n\n expect_total_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])", "def test_module(self):\n pass", "def test_get_results(self):\n pass", "def test_basic_execution(self):", "def test_result_group_can_be_sorted_by_other_metrics(\n self, result_group_roc: ResultGroup, result_1: Result, result_2: Result\n ):\n assert result_group_roc.results == [result_1, result_2]", "def test_package(self):\n pass", "def test_get_scenarios_expanded(self):\n pass", "def test_get_scenarios(self):\n pass", "def after_test(self, test_results):\n pass", "def test_2():", "def test_our_add(self):\n\n # arrange\n x = 2\n y = 3\n expected_result = 5\n\n # act; assert\n self.assertEqual(self.our_module.add(x, y), expected_result)", "def test_composition(self):", "def test_3():", "def pytest_can_run_together(item1, item2):", "def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --", "def test_something(self):\n\n allure.dynamic.title(\"Testing compute_ranks\")\n allure.dynamic.severity(allure.severity_level.NORMAL)\n allure.dynamic.description_html('<h3>Codewars badge:</h3>'\n '<img src=\"https://www.codewars.com/users/myFirstCode'\n '/badges/large\">'\n '<h3>Test Description:</h3>'\n \"<p>Test the function taht organizes a sports league in a \"\n \"round-robin-system. Each team meets all other teams. \"\n \"In your league a win gives a team 2 points, a draw gives \"\n \"both teams 1 point. After some games you have to compute \"\n \"the order of the teams in your league. You use the following \"\n \"criteria to arrange the teams:</p>\"\n \"<ul><li>- Points</li>\"\n \"<li>- Scoring differential (the difference between goals \"\n \"scored and those conceded)</li>\"\n \"<li>- Goals scored</li></ul>\")\n\n test_data = [\n (6,\n [[0, 5, 2, 2],\n [1, 4, 0, 2],\n [2, 3, 1, 2],\n [1, 5, 2, 2],\n [2, 0, 1, 1],\n [3, 4, 1, 1],\n [2, 5, 0, 2],\n [3, 1, 1, 1],\n [4, 0, 2, 0]],\n [4, 4, 6, 3, 1, 2]),\n (6,\n [[0, 5, 2, 0],\n [1, 4, 2, 2],\n [2, 3, 1, 3],\n [1, 5, 0, 0],\n [2, 0, 2, 1],\n [3, 4, 3, 1]],\n [2, 3, 4, 1, 5, 6]),\n (4,\n [[0, 3, 1, 1],\n [1, 2, 2, 2],\n [1, 3, 2, 0],\n [2, 0, 2, 0]],\n [3, 1, 1, 3]),\n (10,\n [],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),\n (8,\n [[0, 7, 2, 0]],\n [1, 2, 2, 2, 2, 2, 2, 8])\n ]\n\n for data in test_data:\n number = data[0]\n games = data[1]\n expected = data[2]\n actual_result = compute_ranks(number, games)\n print_log(number=number,\n games=games,\n expected=expected,\n actual_result=actual_result)\n\n with allure.step(\"Enter a test data and verify the result:\"):\n self.assertEqual(expected, actual_result)", "def runTest(self):\n self.setUp()\n self.test_modul1()", "def run_case(self, **kwargs):\n module_name = kwargs.get('module_name', None)\n if self.result:\n self.success_msg.append('>>>%s PASSED' % module_name or sys.modules[__name__])\n else:\n self.fail_msg.insert(0, '>>>%s FAILED' % module_name or sys.modules[__name__])", "def suite_ended(self, module):", "def test_main(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n result = listdir(dummy_folder,\n full_path=True,\n only_files=False,\n )\n need_result = ['memes',\n 'txt_files',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n need_result = [os.path.join(dummy_folder, x) for x in need_result]\n self.assertEqual(sorted(need_result), sorted(result))\n\n result = listdir(dummy_folder,\n full_path=False,\n only_files=False,\n )\n need_result = ['memes',\n 'txt_files',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n\n self.assertEqual(sorted(need_result), sorted(result))\n\n result = listdir(dummy_folder,\n full_path=True,\n only_files=True,\n )\n need_result = ['antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n need_result = [os.path.join(dummy_folder, x) for x in need_result]\n self.assertEqual(sorted(need_result), sorted(result))\n self.assertEqual(sorted(os.listdir('.')), sorted(listdir(path='.', full_path=False)))", "def test_require():", "def getTestResults():", "def test_one():\n run_mergesort([1], [1])", "def test_1():", "def runtest(self):", "def test_modules(self):\n for mod in self.expected_modules:\n try:\n __import__(mod)\n except ImportError:\n raise", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def test(self):\n test_dir = join_path(self.test_suite.current_test_cache_dir, self.test_src_dir)\n self.run_test(\n \"sh\",\n [\"testshortsort.sh\"],\n expected=\"Alignments sorted by coordinate.\",\n purpose=\"test: checking alignments\",\n work_dir=test_dir,\n )", "def test_alternate_orderings(self):\r\n t1 = self.task_xml1\r\n t2 = self.task_xml2\r\n xml_to_test = [[t1], [t2], [t1, t1], [t1, t2], [t2, t2], [t2, t1], [t1, t2, t1]]\r\n for xml in xml_to_test:\r\n definition = {'prompt': etree.XML(self.prompt), 'rubric': etree.XML(self.rubric), 'task_xml': xml}\r\n descriptor = Mock(data=definition)\r\n combinedoe = CombinedOpenEndedV1Module(self.test_system,\r\n self.location,\r\n definition,\r\n descriptor,\r\n static_data=self.static_data,\r\n metadata=self.metadata,\r\n instance_state=self.static_data)\r\n\r\n changed = combinedoe.update_task_states()\r\n self.assertFalse(changed)\r\n\r\n combinedoe = CombinedOpenEndedV1Module(self.test_system,\r\n self.location,\r\n definition,\r\n descriptor,\r\n static_data=self.static_data,\r\n metadata=self.metadata,\r\n instance_state={'task_states': TEST_STATE_SA})\r\n\r\n combinedoe = CombinedOpenEndedV1Module(self.test_system,\r\n self.location,\r\n definition,\r\n descriptor,\r\n static_data=self.static_data,\r\n metadata=self.metadata,\r\n instance_state={'task_states': TEST_STATE_SA_IN})", "def unitary_test():", "def test_compare(self):", "def test_list_group(self):\n pass", "def test_4():", "def _check_results(self, caller_info, module_info, results, module_ignore_errors, verbosity):\n if module_ignore_errors:\n return\n\n filename, line_number, function_name, lines, index = caller_info\n caller_str = \"{}::{}#{}\".format(filename, function_name, line_number)\n\n if isinstance(self, AnsibleHosts):\n hosts_str = json.dumps(self.hostnames)\n elif isinstance(self, AnsibleHost):\n hosts_str = json.dumps(self.hostnames[0])\n results = results.get(self.hostnames[0], {})\n else:\n raise TypeError(\"Unsupported type of object: {}\".format(type(self)))\n\n if isinstance(module_info, dict):\n module_names = json.dumps(module_info.get(\"module_name\", \"\"))\n hint_str = \"AnsibleModule::{}\".format(module_names)\n elif isinstance(module_info, list):\n module_names = \", \".join([module_item.get(\"module_name\", \"\") for module_item in module_info])\n hint_str = \"AnsibleModules::{}\".format(json.dumps(module_names))\n else:\n raise TypeError(\"Got {}, expected tuple or list of tuples, tuple items: \"\n \"module_name, module_args, module_kwargs, module_attrs\".format(type(module_info)))\n\n err_msg = \"\"\n if verbosity <= 0: # No information of module and result\n err_msg = \"Run ansible module failed\"\n elif verbosity == 1: # Log module name only. Do not log args and result\n err_msg = \"{}: {} -> {} failed\".format(\n caller_str,\n hosts_str,\n hint_str\n )\n elif verbosity >= 2: # Log module name, args and result\n if verbosity == 2:\n indent = None\n elif verbosity >= 3:\n indent = 4\n\n err_msg = \"{}: {} -> {} failed, Results => {}\".format(\n caller_str,\n hosts_str,\n hint_str,\n json.dumps(results, indent=indent)\n )\n\n if isinstance(self, AnsibleHosts):\n if isinstance(module_info, dict):\n failed = any([res[\"failed\"] for res in results.values()])\n else:\n failed = any([any([res[\"failed\"] for res in module_results]) for module_results in results.values()])\n elif isinstance(self, AnsibleHost):\n if isinstance(module_info, dict):\n failed = results[\"failed\"]\n else:\n failed = any([res[\"failed\"] for res in results])\n if failed:\n raise RunAnsibleModuleFailed(err_msg)", "def test_subworkflows_info_in_modules_repo(self):\n self.subworkflow_install.install(\"bam_sort_stats_samtools\")\n mods_info = nf_core.subworkflows.SubworkflowInfo(self.nfcore_modules, \"bam_sort_stats_samtools\")\n mods_info.local = True\n mods_info_output = mods_info.get_component_info()\n console = Console(record=True)\n console.print(mods_info_output)\n output = console.export_text()\n\n assert \"Subworkflow: bam_sort_stats_samtools\" in output\n assert \"Inputs\" in output\n assert \"Outputs\" in output", "def test_get_result_top_files(self):\n pass", "def test_get_result_top_file(self):\n pass", "def test_12(self):\n num_elements = np.random.randint(1, 11)\n\n input_array = np.random.normal(size=num_elements)\n\n # We first check the sorting implementation.\n py = sorted(input_array)\n f90 = fort_debug.wrapper_sorted(input_array, num_elements)\n assert_equal(py, f90)\n\n params_spec, options_spec = generate_random_model()\n respy_obj = RespyCls(params_spec, options_spec)\n\n edu_spec, optim_paras, num_types = dist_class_attributes(\n respy_obj, \"edu_spec\", \"optim_paras\", \"num_types\"\n )\n\n args = (edu_spec[\"start\"], edu_spec[\"share\"], edu_spec[\"max\"])\n f90 = fort_debug.wrapper_sort_edu_spec(*args)\n py = sort_edu_spec(edu_spec)\n for i, label in enumerate([\"start\", \"share\", \"max\"]):\n assert_equal(py[label], f90[i])\n\n py = sort_type_info(optim_paras, num_types)\n f90 = fort_debug.wrapper_sort_type_info(optim_paras[\"type_shares\"], num_types)\n for i, label in enumerate([\"order\", \"shares\"]):\n assert_equal(py[label], f90[i])", "def test_1():\n results = base_tests()\n assert type(results) is list\n assert type(results[0]) is dict\n assert len(results) == 3", "def test_integration2(self):\n self._test_integration(2)", "def _super_run_modified(self, result=None):\n\n orig_result = result\n if result is None:\n result = self.defaultTestResult()\n startTestRun = getattr(result, 'startTestRun', None)\n if startTestRun is not None:\n startTestRun()\n\n result.startTest(self)\n\n testMethod = getattr(self, self._testMethodName)\n if (getattr(self.__class__, \"__unittest_skip__\", False) or\n getattr(testMethod, \"__unittest_skip__\", False)):\n # If the class or method was skipped.\n try:\n skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')\n or getattr(testMethod, '__unittest_skip_why__', ''))\n self._addSkip(result, self, skip_why)\n finally:\n result.stopTest(self)\n return\n expecting_failure_method = getattr(testMethod,\n \"__unittest_expecting_failure__\", False)\n expecting_failure_class = getattr(self,\n \"__unittest_expecting_failure__\", False)\n expecting_failure = expecting_failure_class or expecting_failure_method\n outcome = Outcome(result)\n try:\n self._outcome = outcome\n\n with outcome.testPartExecutor(self):\n self.setUp()\n if outcome.success:\n outcome.expecting_failure = expecting_failure\n with outcome.testPartExecutor(self, isTest=True):\n testMethod()\n\n # 当前用例失败时触发on_errors回调\n if not outcome.success:\n with outcome.testPartExecutor(self):\n self.on_errors(outcome.errors)\n\n outcome.expecting_failure = False\n with outcome.testPartExecutor(self):\n self.tearDown()\n\n self.doCleanups()\n for test, reason in outcome.skipped:\n self._addSkip(result, test, reason)\n self._feedErrorsToResult(result, outcome.errors)\n if outcome.success:\n if expecting_failure:\n if outcome.expectedFailure:\n self._addExpectedFailure(result, outcome.expectedFailure)\n else:\n self._addUnexpectedSuccess(result)\n else:\n result.addSuccess(self)\n return result\n finally:\n result.stopTest(self)\n if orig_result is None:\n stopTestRun = getattr(result, 'stopTestRun', None)\n if stopTestRun is not None:\n stopTestRun()\n\n # explicitly break reference cycles:\n # outcome.errors -> frame -> outcome -> outcome.errors\n # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure\n del outcome.errors[:] # equivalent to [].clear in py3\n outcome.expectedFailure = None\n\n # clear the outcome, no more needed\n self._outcome = None", "def test_test_group_parameters(self):\n pass", "def test_get_order(self):\n pass", "def test_sort_cards(a_list, result):\n assert sort_cards(a_list) == result", "def process_module_list(self, modules):", "def main():\n test_merge_quick_sort()\n test_compare()", "def test_4_4_1_1(self):\n pass", "def test_T2():", "def test_T2():", "def test_get_order_items(self):\n pass", "def __call__(self, result=None):\n self._pre_setup()\n super(TestCase, self).__call__(result)\n self._post_tearDown()", "def test_T1():", "def test_T1():", "def inner_test():\n pass", "def inner_test():\n pass", "def sort_results(self):\n pass", "def run_test_module(queue, test_module):\n\n import unittest\n # Import the module\n m = importlib.import_module('.'+test_module, 'test')\n\n\n # initialize the test suite\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n\n # add tests to the test suite\n suite.addTests(loader.loadTestsFromModule(m))\n\n # initialize a runner, pass it your suite and run it\n runner = unittest.TextTestRunner(verbosity=3)\n result = runner.run(suite)\n\n# return(result.testsRun, len(result.failures), len(result.errors))\n queue.put((result.testsRun, len(result.failures), len(result.errors)))", "def test_modules_in_function_return_type_hint_multiple(dependency_testing_model) -> None:\n func: Callable = dependency_testing_model.nested_multiple_returns_hint\n expected_modules = {'urllib3', 'PIL'}\n extracted_modules: Set[str] = md.modules_in_function_signature(func)\n assert extracted_modules == expected_modules", "def test_get_result_top_dir(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def assertResults(self, expected, result, deduped=False):\n self.assertEqual([u'shards'], result.keys())\n self.assertEqual(1, len(result[u'shards']))\n self.assertTrue(result[u'shards'][0], result)\n result = result[u'shards'][0].copy()\n self.assertFalse(result.get(u'abandoned_ts'))\n bot_version = result.pop(u'bot_version')\n self.assertTrue(bot_version)\n if result.get(u'costs_usd') is not None:\n expected.pop(u'costs_usd', None)\n self.assertLess(0, result.pop(u'costs_usd'))\n if result.get(u'cost_saved_usd') is not None:\n expected.pop(u'cost_saved_usd', None)\n self.assertLess(0, result.pop(u'cost_saved_usd'))\n self.assertTrue(result.pop(u'created_ts'))\n self.assertTrue(result.pop(u'completed_ts'))\n self.assertLess(0, result.pop(u'duration'))\n task_id = result.pop(u'task_id')\n run_id = result.pop(u'run_id')\n self.assertTrue(task_id)\n self.assertTrue(task_id.endswith('0'), task_id)\n if not deduped:\n self.assertEqual(task_id[:-1] + '1', run_id)\n self.assertTrue(result.pop(u'bot_idle_since_ts'))\n self.assertTrue(result.pop(u'modified_ts'))\n self.assertTrue(result.pop(u'started_ts'))\n\n if getattr(expected.get(u'output'), 'match', None):\n expected_output = expected.pop(u'output')\n output = result.pop('output')\n self.assertTrue(\n expected_output.match(output),\n '%s does not match %s' % (output, expected_output.pattern))\n\n # Bot python version may be different.\n result[u'bot_dimensions'] = sorted(\n [d for d in result[u'bot_dimensions'] if not d['key'] == 'python'])\n\n self.assertEqual(expected, result)\n return bot_version", "def test_importer_returns_tests():\n flowtask = FlowTaskFactory()\n flowtask.build_flow.build.org = OrgFactory()\n with temporary_dir() as output_dir:\n copyfile(\n TEST_ROBOT_OUTPUT_FILES / \"robot_with_failures.xml\",\n Path(output_dir) / \"output.xml\",\n )\n actual = robot_importer.import_robot_test_results(flowtask, output_dir)\n expected = [\n {\n \"name\": \"Passing test\",\n \"group\": \"Robot Fail\",\n \"status\": \"Pass\",\n \"start_time\": \"2020-06-23T18:49:20.955000+00:00\",\n \"end_time\": \"2020-06-23T18:49:20.956000+00:00\",\n \"exception\": \"Life is good, yo.\",\n \"doc\": \"\",\n \"tags\": [\"tag one\", \"tag two\"],\n },\n {\n \"name\": \"Failing test 1\",\n \"group\": \"Robot Fail\",\n \"status\": \"Fail\",\n \"start_time\": \"2020-06-23T18:49:20.957000+00:00\",\n \"end_time\": \"2020-06-23T18:49:20.960000+00:00\",\n \"exception\": \"Danger, Will Robinson!\",\n \"doc\": \"A test that fails with a keyword directly in the test\",\n \"tags\": [],\n },\n {\n \"name\": \"Failing test 2\",\n \"group\": \"Robot Fail\",\n \"status\": \"Fail\",\n \"start_time\": \"2020-06-23T18:49:20.960000+00:00\",\n \"end_time\": \"2020-06-23T18:49:20.963000+00:00\",\n \"doc\": \"A test that fails due to a failure in a lower level keyword.\",\n \"exception\": \"I'm sorry, Dave. I'm afraid I can't do that.\",\n \"tags\": [],\n },\n {\n \"name\": \"Failing test 3\",\n \"group\": \"Robot Fail\",\n \"status\": \"Fail\",\n \"start_time\": \"2020-06-23T18:49:21.017000+00:00\",\n \"end_time\": \"2020-06-23T18:49:21.024000+00:00\",\n \"exception\": (\n \"Several failures occurred:\\n\\n\"\n \" 1) First failure\\n\\n\"\n \" 2) Second failure\"\n ),\n \"doc\": \"A test that has multiple keyword failures\",\n \"tags\": [],\n },\n ]\n assert actual == expected", "def test_suite():\n test(sum_upto_first_even([1,3,2]),4)\n test(sum_upto_first_even([1,3,3]),7)\n test(sum_upto_first_even([2,3,3]),0)", "def test_something():", "def pytest_after_group_items(session, config, items):", "def test_setup_module(self):\n pluggable_package.setup(self._test_module)\n self._test_setup(self._test_module)", "def TestOneStep(self):\n pass", "def test_02_visit_again(self):", "def test_T3():", "def test_T3():", "def test_apply_endorsements(self):", "def runTests(self):\n \n pass", "def tests():", "def spec_tests():\n pass", "def test_collect_scripts_depends_on_integration_with_items(\n self, dependency_integration_command, expected_result, module_repo\n ):\n test_input = [\n {\n \"DummyScript\": {\n \"name\": \"DummyScript\",\n \"file_path\": \"dummy_path\",\n \"depends_on\": [dependency_integration_command],\n \"pack\": \"dummy_pack\",\n }\n }\n ]\n\n found_result, found_items = PackDependencies._collect_scripts_dependencies(\n pack_scripts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_result[0]\n assert found_items == expected_result[1]", "def test_000_add_group(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def _test(self, **kwargs):\n raise ValueError(\"This function is not available in lazy results evaluation as it would \"\n \"require all pairwise tests to be performed.\")", "def test_get_run(self):\n pass", "def test_after_jam_step_two(self):\n for test_suite_class in self.jam_step_2_test_suite_list:\n test_suite = test_suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def test_by_order(self):\n # addon_executor = AddonExecutor(execute_order, stop_order)\n # self.assertEqual(expected, addon_executor.execute_with_order(addon, execute_order, stop_order))\n self.run_mgr.by_order(self.cli_inst, ['execute', 'start'], ['stop'])\n output = self._get_lines_as_list(sys.stdout)\n\n self.assertTrue(output[0].startswith('Execute'))\n self.assertTrue(output[1].startswith('Start'))\n self.assertTrue(output[2].startswith('Stop'))", "def test(self):\n pass", "def test_returns_sorted_projects_by_difficulty_if_sort_by_set_to_difficulty(self):\n # Arrange\n # Set difficulty of test_project_1 to easy.\n self.test_project_1.difficulty = ProjectDifficulty.EASY.value\n self.test_project_1.save()\n # Set project_2 to be allowed for all users removing as private.\n self.test_project_2.private = False\n self.test_project_2.difficulty = ProjectDifficulty.MODERATE.value\n self.test_project_2.save()\n # Set difficulty of test_project_1 to hard and status to published.\n self.test_project_3.status = ProjectStatus.PUBLISHED.value\n self.test_project_3.difficulty = ProjectDifficulty.CHALLENGING.value\n self.test_project_3.save()\n\n # Test for descending order\n # Act\n response_desc = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"orderBy\": \"difficulty\", \"orderByType\": \"DESC\"},\n )\n # Assert\n self.assertEqual(response_desc.status_code, 200)\n self.assertEqual(len(response_desc.json[\"results\"]), 3)\n expected_desc_order = [\n self.test_project_3.id,\n self.test_project_2.id,\n self.test_project_1.id,\n ]\n self.assertListEqual(\n [i[\"projectId\"] for i in response_desc.json[\"results\"]], expected_desc_order\n )\n\n # Test for ascending order\n # Act\n response_asc = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"orderBy\": \"difficulty\", \"orderByType\": \"ASC\"},\n )\n # Assert\n self.assertEqual(response_asc.status_code, 200)\n self.assertEqual(len(response_asc.json[\"results\"]), 3)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_asc.json[\"results\"]],\n expected_desc_order[::-1],\n )", "def test(self):", "def test(self):", "def test_import_allows_multiple_modules_successful(self):\n # Deliberately using modules that will already be imported to avoid side effects.\n feature = LazyImportTester([\"site\", \"sys\"])\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n self.assertTrue(feature)\n check.assert_called_once()", "def test_T01():", "def test_patch_group(self):\n pass", "def test_patch_group(self):\n pass", "def test_returns_sorted_projects_by_priority_if_sort_by_set_to_priority(self):\n # Arrange\n # Set priority of test_project_1 to urgent.\n self.test_project_1.priority = ProjectPriority.URGENT.value\n self.test_project_1.save()\n # Set project_2 to be allowed for all users removing as private.\n self.test_project_2.private = False\n self.test_project_2.priority = ProjectPriority.HIGH.value\n self.test_project_2.save()\n # Set priority of test_project_1 to low and status to published.\n self.test_project_3.status = ProjectStatus.PUBLISHED.value\n self.test_project_3.priority = ProjectPriority.MEDIUM.value\n self.test_project_3.save()\n test_project_4 = Project.clone(self.test_project_2.id, self.test_author.id)\n test_project_4.status = ProjectStatus.PUBLISHED.value\n test_project_4.priority = ProjectPriority.LOW.value\n test_project_4.save()\n\n # Test for descending order\n # Act\n response_desc = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"orderBy\": \"priority\", \"orderByType\": \"DESC\"},\n )\n # Assert\n self.assertEqual(response_desc.status_code, 200)\n self.assertEqual(len(response_desc.json[\"results\"]), 4)\n expected_desc_order = [\n test_project_4.id,\n self.test_project_3.id,\n self.test_project_2.id,\n self.test_project_1.id,\n ]\n self.assertListEqual(\n [i[\"projectId\"] for i in response_desc.json[\"results\"]], expected_desc_order\n )\n\n # Test for ascending order\n # Act\n response_asc = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"orderBy\": \"priority\", \"orderByType\": \"ASC\"},\n )\n # Assert\n self.assertEqual(response_asc.status_code, 200)\n self.assertEqual(len(response_asc.json[\"results\"]), 4)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_asc.json[\"results\"]],\n expected_desc_order[::-1],\n )", "def test_get_results_verbose(self):\n\t\tpass", "def test_generate_all_testing(self):\n pass", "def required_tool_results():\n return [MethylResultModule]", "def test_results_are_sorted(self, data_flow_api_client):\n response = data_flow_api_client.get(self.view_url)\n assert response.status_code == status.HTTP_200_OK\n\n results = response.json()['results']\n\n assert results == sorted(results, key=lambda t: t['id'])" ]
[ "0.7826515", "0.6627111", "0.6378898", "0.6037766", "0.58032465", "0.5796728", "0.57169634", "0.5695606", "0.5693893", "0.56901085", "0.5688211", "0.5681043", "0.5648296", "0.56379336", "0.5626959", "0.56251144", "0.56239635", "0.5623645", "0.5619237", "0.5617106", "0.56157196", "0.5597624", "0.55765915", "0.55617803", "0.55575514", "0.5538702", "0.5538075", "0.5525763", "0.5525763", "0.5525763", "0.5525763", "0.5525763", "0.55045", "0.5496916", "0.5496806", "0.54862446", "0.5476667", "0.54763955", "0.54667383", "0.54634464", "0.54573643", "0.54403955", "0.5436156", "0.5420003", "0.5413926", "0.5412421", "0.5398223", "0.53973037", "0.53927284", "0.5381741", "0.5379564", "0.53794193", "0.5377684", "0.5377684", "0.53762674", "0.5370803", "0.53701144", "0.53701144", "0.5369682", "0.5369682", "0.53649217", "0.53567845", "0.53537965", "0.5348977", "0.5346165", "0.5346165", "0.5346165", "0.53439176", "0.5337755", "0.5332605", "0.53312784", "0.53269786", "0.5323578", "0.53233075", "0.53210574", "0.5316994", "0.5316994", "0.53158206", "0.5313424", "0.5313317", "0.5308411", "0.5303565", "0.529927", "0.52975243", "0.5293326", "0.5292703", "0.5291224", "0.528638", "0.5281398", "0.5279178", "0.5279178", "0.5276973", "0.5270952", "0.526947", "0.526947", "0.5265708", "0.5263768", "0.52614456", "0.5253967", "0.52398163" ]
0.8463349
0
Test _arrange_test_result method with multi module.
def test_arrange_test_result_multi_module(self): group_a_pass_1 = self._create_test_result(group_name='grpup_a', status=test_runner_base.PASSED_STATUS) group_b_pass_1 = self._create_test_result(group_name='grpup_b', status=test_runner_base.PASSED_STATUS) group_c_pass_1 = self._create_test_result(group_name='grpup_c', status=test_runner_base.PASSED_STATUS) group_b_fail_1 = self._create_test_result(group_name='grpup_b', status=test_runner_base.FAILED_STATUS) group_c_fail_1 = self._create_test_result(group_name='grpup_c', status=test_runner_base.FAILED_STATUS) group_c_ignore_1 = self._create_test_result(group_name='grpup_c', status=test_runner_base.IGNORED_STATUS) reporter_1 = result_reporter.ResultReporter() reporter_1.all_test_results.extend([group_a_pass_1, group_b_pass_1, group_c_pass_1]) reporter_2 = result_reporter.ResultReporter() reporter_2.all_test_results.extend([group_b_fail_1, group_c_fail_1, group_c_ignore_1]) info_dict = {} aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2]) expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0, aei._STATUS_FAILED_KEY : 0, aei._STATUS_PASSED_KEY : 1} self.assertEqual( expect_group_a_summary, info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_a'][aei._SUMMARY_KEY]) expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 0, aei._STATUS_FAILED_KEY : 1, aei._STATUS_PASSED_KEY : 1} self.assertEqual( expect_group_b_summary, info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_b'][aei._SUMMARY_KEY]) expect_group_c_summary = {aei._STATUS_IGNORED_KEY : 1, aei._STATUS_FAILED_KEY : 1, aei._STATUS_PASSED_KEY : 1} self.assertEqual( expect_group_c_summary, info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_c'][aei._SUMMARY_KEY]) expect_total_summary = {aei._STATUS_IGNORED_KEY : 1, aei._STATUS_FAILED_KEY : 2, aei._STATUS_PASSED_KEY : 3} self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_arrange_test_result_one_module(self):\n pass_1 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_2 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_3 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n fail_1 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n fail_2 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n ignore_1 = self._create_test_result(status=test_runner_base.IGNORED_STATUS)\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([pass_1, pass_2, pass_3])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([fail_1, fail_2, ignore_1])\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_summary, info_dict[aei._TOTAL_SUMMARY_KEY])", "def test_arrange_test_result_multi_runner(self):\n runner_a_pass_1 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_a_pass_2 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_a_pass_3 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_b_fail_1 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.FAILED_STATUS)\n runner_b_fail_2 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.FAILED_STATUS)\n runner_b_ignore_1 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.IGNORED_STATUS)\n\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([runner_a_pass_1, runner_a_pass_2, runner_a_pass_3])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([runner_b_fail_1, runner_b_fail_2, runner_b_ignore_1])\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 0,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(\n expect_group_a_summary,\n info_dict[aei._TEST_RUNNER_KEY]['runner_a']['someModule'][aei._SUMMARY_KEY])\n\n expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 0}\n self.assertEqual(\n expect_group_b_summary,\n info_dict[aei._TEST_RUNNER_KEY]['runner_b']['someModule'][aei._SUMMARY_KEY])\n\n expect_total_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])", "def test_get_results(self):\n pass", "def test_something(self):\n\n allure.dynamic.title(\"Testing compute_ranks\")\n allure.dynamic.severity(allure.severity_level.NORMAL)\n allure.dynamic.description_html('<h3>Codewars badge:</h3>'\n '<img src=\"https://www.codewars.com/users/myFirstCode'\n '/badges/large\">'\n '<h3>Test Description:</h3>'\n \"<p>Test the function taht organizes a sports league in a \"\n \"round-robin-system. Each team meets all other teams. \"\n \"In your league a win gives a team 2 points, a draw gives \"\n \"both teams 1 point. After some games you have to compute \"\n \"the order of the teams in your league. You use the following \"\n \"criteria to arrange the teams:</p>\"\n \"<ul><li>- Points</li>\"\n \"<li>- Scoring differential (the difference between goals \"\n \"scored and those conceded)</li>\"\n \"<li>- Goals scored</li></ul>\")\n\n test_data = [\n (6,\n [[0, 5, 2, 2],\n [1, 4, 0, 2],\n [2, 3, 1, 2],\n [1, 5, 2, 2],\n [2, 0, 1, 1],\n [3, 4, 1, 1],\n [2, 5, 0, 2],\n [3, 1, 1, 1],\n [4, 0, 2, 0]],\n [4, 4, 6, 3, 1, 2]),\n (6,\n [[0, 5, 2, 0],\n [1, 4, 2, 2],\n [2, 3, 1, 3],\n [1, 5, 0, 0],\n [2, 0, 2, 1],\n [3, 4, 3, 1]],\n [2, 3, 4, 1, 5, 6]),\n (4,\n [[0, 3, 1, 1],\n [1, 2, 2, 2],\n [1, 3, 2, 0],\n [2, 0, 2, 0]],\n [3, 1, 1, 3]),\n (10,\n [],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),\n (8,\n [[0, 7, 2, 0]],\n [1, 2, 2, 2, 2, 2, 2, 8])\n ]\n\n for data in test_data:\n number = data[0]\n games = data[1]\n expected = data[2]\n actual_result = compute_ranks(number, games)\n print_log(number=number,\n games=games,\n expected=expected,\n actual_result=actual_result)\n\n with allure.step(\"Enter a test data and verify the result:\"):\n self.assertEqual(expected, actual_result)", "def test_module(self):\n pass", "def test_result_group_can_be_sorted_by_other_metrics(\n self, result_group_roc: ResultGroup, result_1: Result, result_2: Result\n ):\n assert result_group_roc.results == [result_1, result_2]", "def test_composition(self):", "def test_basic_execution(self):", "def test_after_jam_step_two(self):\n for test_suite_class in self.jam_step_2_test_suite_list:\n test_suite = test_suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def test_3():", "def test_list_group(self):\n pass", "def main():\n test_merge_quick_sort()\n test_compare()", "def runTest(self):\n self.setUp()\n self.test_modul1()", "def test_get_scenarios(self):\n pass", "def getTestResults():", "def test_get_scenarios_expanded(self):\n pass", "def test_one():\n run_mergesort([1], [1])", "def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --", "def runtest(self):", "def after_test(self, test_results):\n pass", "def process_module_list(self, modules):", "def test_subworkflows_info_in_modules_repo(self):\n self.subworkflow_install.install(\"bam_sort_stats_samtools\")\n mods_info = nf_core.subworkflows.SubworkflowInfo(self.nfcore_modules, \"bam_sort_stats_samtools\")\n mods_info.local = True\n mods_info_output = mods_info.get_component_info()\n console = Console(record=True)\n console.print(mods_info_output)\n output = console.export_text()\n\n assert \"Subworkflow: bam_sort_stats_samtools\" in output\n assert \"Inputs\" in output\n assert \"Outputs\" in output", "def test_2():", "def runTests(self):\n \n pass", "def test():\n\t\treturn [\"vice.multizone\",\n\t\t\t[\n\t\t\t\ttest_from_output(),\n\t\t\t\tmig_matrix_row.test(run = False),\n\t\t\t\tmig_matrix.test(run = False),\n\t\t\t\tmig_specs.test(run = False),\n\t\t\t\tzone_array.test(run = False),\n\t\t\t\t_multizone.test(run = False),\n\t\t\t\tsrc_test(run = False)\n\t\t\t]\n\t\t]", "def test_test_group_parameters(self):\n pass", "def master_test_suite( pkg_mod_iter ):\n master_suite= unittest.TestSuite()\n for package, module_iter in pkg_mod_iter:\n for filename, module in module_iter:\n print( package+\".\"+module )\n suite= doctest.DocTestSuite( package+\".\"+module )\n print( \" \", suite )\n master_suite.addTests( suite )\n runner= unittest.TextTestRunner( verbosity=1 )\n runner.run( master_suite )", "def test_modules_in_function_return_type_hint_multiple(dependency_testing_model) -> None:\n func: Callable = dependency_testing_model.nested_multiple_returns_hint\n expected_modules = {'urllib3', 'PIL'}\n extracted_modules: Set[str] = md.modules_in_function_signature(func)\n assert extracted_modules == expected_modules", "def pytest_after_group_items(session, config, items):", "def do_test():\n for x in execute_helper(test_info,crossmap_tests):\n yield x", "def test_main(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n result = listdir(dummy_folder,\n full_path=True,\n only_files=False,\n )\n need_result = ['memes',\n 'txt_files',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n need_result = [os.path.join(dummy_folder, x) for x in need_result]\n self.assertEqual(sorted(need_result), sorted(result))\n\n result = listdir(dummy_folder,\n full_path=False,\n only_files=False,\n )\n need_result = ['memes',\n 'txt_files',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n\n self.assertEqual(sorted(need_result), sorted(result))\n\n result = listdir(dummy_folder,\n full_path=True,\n only_files=True,\n )\n need_result = ['antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n need_result = [os.path.join(dummy_folder, x) for x in need_result]\n self.assertEqual(sorted(need_result), sorted(result))\n self.assertEqual(sorted(os.listdir('.')), sorted(listdir(path='.', full_path=False)))", "def test(self):\n test_dir = join_path(self.test_suite.current_test_cache_dir, self.test_src_dir)\n self.run_test(\n \"sh\",\n [\"testshortsort.sh\"],\n expected=\"Alignments sorted by coordinate.\",\n purpose=\"test: checking alignments\",\n work_dir=test_dir,\n )", "def test_alternate_orderings(self):\r\n t1 = self.task_xml1\r\n t2 = self.task_xml2\r\n xml_to_test = [[t1], [t2], [t1, t1], [t1, t2], [t2, t2], [t2, t1], [t1, t2, t1]]\r\n for xml in xml_to_test:\r\n definition = {'prompt': etree.XML(self.prompt), 'rubric': etree.XML(self.rubric), 'task_xml': xml}\r\n descriptor = Mock(data=definition)\r\n combinedoe = CombinedOpenEndedV1Module(self.test_system,\r\n self.location,\r\n definition,\r\n descriptor,\r\n static_data=self.static_data,\r\n metadata=self.metadata,\r\n instance_state=self.static_data)\r\n\r\n changed = combinedoe.update_task_states()\r\n self.assertFalse(changed)\r\n\r\n combinedoe = CombinedOpenEndedV1Module(self.test_system,\r\n self.location,\r\n definition,\r\n descriptor,\r\n static_data=self.static_data,\r\n metadata=self.metadata,\r\n instance_state={'task_states': TEST_STATE_SA})\r\n\r\n combinedoe = CombinedOpenEndedV1Module(self.test_system,\r\n self.location,\r\n definition,\r\n descriptor,\r\n static_data=self.static_data,\r\n metadata=self.metadata,\r\n instance_state={'task_states': TEST_STATE_SA_IN})", "def run_test_suite( pkg_mod_iter ):\n for package, module_iter in pkg_mod_iter:\n print( package )\n print( \"=\"*len(package ) )\n print()\n for filename, module in module_iter:\n suite= doctest.DocTestSuite( package+\".\"+module )\n runner= unittest.TextTestRunner( verbosity=1 )\n runner.run( suite )", "def run_test_module(queue, test_module):\n\n import unittest\n # Import the module\n m = importlib.import_module('.'+test_module, 'test')\n\n\n # initialize the test suite\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n\n # add tests to the test suite\n suite.addTests(loader.loadTestsFromModule(m))\n\n # initialize a runner, pass it your suite and run it\n runner = unittest.TextTestRunner(verbosity=3)\n result = runner.run(suite)\n\n# return(result.testsRun, len(result.failures), len(result.errors))\n queue.put((result.testsRun, len(result.failures), len(result.errors)))", "def test_4():", "def tests():", "def test_1():", "def test_12(self):\n num_elements = np.random.randint(1, 11)\n\n input_array = np.random.normal(size=num_elements)\n\n # We first check the sorting implementation.\n py = sorted(input_array)\n f90 = fort_debug.wrapper_sorted(input_array, num_elements)\n assert_equal(py, f90)\n\n params_spec, options_spec = generate_random_model()\n respy_obj = RespyCls(params_spec, options_spec)\n\n edu_spec, optim_paras, num_types = dist_class_attributes(\n respy_obj, \"edu_spec\", \"optim_paras\", \"num_types\"\n )\n\n args = (edu_spec[\"start\"], edu_spec[\"share\"], edu_spec[\"max\"])\n f90 = fort_debug.wrapper_sort_edu_spec(*args)\n py = sort_edu_spec(edu_spec)\n for i, label in enumerate([\"start\", \"share\", \"max\"]):\n assert_equal(py[label], f90[i])\n\n py = sort_type_info(optim_paras, num_types)\n f90 = fort_debug.wrapper_sort_type_info(optim_paras[\"type_shares\"], num_types)\n for i, label in enumerate([\"order\", \"shares\"]):\n assert_equal(py[label], f90[i])", "def run_combined(self):\n self.runtest_autokey()\n self.runtest_mediaresource()\n self.runtest_composite_slug()\n self.runtest_all_types()\n self.runtest_complex_types()\n self.runtest_only_key()\n self.runtest_compound_key()\n self.runtest_simple_select()\n self.runtest_paging()\n self.runtest_nav_o2o()\n self.runtest_nav_o2o_1()\n self.runtest_nav_zo2o()\n self.runtest_nav_zo2o_f()\n self.runtest_nav_zo2o_b()\n self.runtest_nav_many2o()\n self.runtest_nav_many2o_f()\n self.runtest_nav_many2o_b()\n self.runtest_nav_many2zo()\n self.runtest_nav_many2zo_f()\n self.runtest_nav_many2zo_b()\n self.runtest_nav_many2zo_r()\n self.runtest_nav_many2zo_rf()\n self.runtest_nav_many2zo_rb()\n self.runtest_nav_many2many()\n self.runtest_nav_many2many_1()\n self.runtest_nav_many2many_r()\n self.runtest_nav_many2many_r1()", "def test_T3():", "def test_T3():", "def test_our_add(self):\n\n # arrange\n x = 2\n y = 3\n expected_result = 5\n\n # act; assert\n self.assertEqual(self.our_module.add(x, y), expected_result)", "def test_sort_cards(a_list, result):\n assert sort_cards(a_list) == result", "def _test(self, **kwargs):\n raise ValueError(\"This function is not available in lazy results evaluation as it would \"\n \"require all pairwise tests to be performed.\")", "def test_1():\n results = base_tests()\n assert type(results) is list\n assert type(results[0]) is dict\n assert len(results) == 3", "def test_all():\n test_get_to()\n test_error_type()\n test_exchange()\n print(\"All tests passed.\")", "def run_case(self, **kwargs):\n module_name = kwargs.get('module_name', None)\n if self.result:\n self.success_msg.append('>>>%s PASSED' % module_name or sys.modules[__name__])\n else:\n self.fail_msg.insert(0, '>>>%s FAILED' % module_name or sys.modules[__name__])", "def test_batch(self):\n pass", "def test_by_order(self):\n # addon_executor = AddonExecutor(execute_order, stop_order)\n # self.assertEqual(expected, addon_executor.execute_with_order(addon, execute_order, stop_order))\n self.run_mgr.by_order(self.cli_inst, ['execute', 'start'], ['stop'])\n output = self._get_lines_as_list(sys.stdout)\n\n self.assertTrue(output[0].startswith('Execute'))\n self.assertTrue(output[1].startswith('Start'))\n self.assertTrue(output[2].startswith('Stop'))", "def exe_tests(self):\n self.rank = mpicom.rank()\n self.size = mpicom.size()\n if mpicom.parallel():\n self.test(\"libname\",os.path.split(mpicom.__file__)[1],\"mpicom.so\")\n else:\n self.test(\"libname\",os.path.split(mpicom.__file__)[1],\"mpistub.pyc\")\n self.test_broadcast()\n self.test_reduce()\n self.test_p2p()\n self.test_gather()\n self.test_scatter()\n #self.test_alltoall()", "def test_integration3(self):\n self._test_integration(3)", "def __call__(self, result=None):\n self._pre_setup()\n super(TestCase, self).__call__(result)\n self._post_tearDown()", "def pytest_can_run_together(item1, item2):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def test_generate_all_testing(self):\n pass", "def test_insertSort3(self):\n\t\tsortObj=insertSort()\n\t\tself.assertEqual(sortObj.run_sort(self.test_3[0]),self.test_3[1])", "def test_order_by(self):\n manifestb = job_test_utils.create_seed_manifest(name='scale-batch-creator', jobVersion='2.0.0')\n job_type1b = job_test_utils.create_seed_job_type(manifest=manifestb)\n job_test_utils.create_job(job_type=job_type1b, status='RUNNING')\n\n manifestc = job_test_utils.create_seed_manifest(name='scale-batch-creator', jobVersion='3.0.0')\n job_type1c = job_test_utils.create_seed_job_type(manifest=manifestc)\n job_test_utils.create_job(job_type=job_type1c, status='RUNNING')\n\n url = '/%s/jobs/?is_superseded=false&order=job_type__name&order=-job_type__version' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 4)\n\n self.assertEqual(result['results'][0]['job_type']['id'], job_type1c.id)\n self.assertEqual(result['results'][1]['job_type']['id'], job_type1b.id)\n self.assertEqual(result['results'][2]['job_type']['id'], self.job_type1.id)\n self.assertEqual(result['results'][3]['job_type']['id'], self.job_type2.id)", "def test_modules(self):\n for mod in self.expected_modules:\n try:\n __import__(mod)\n except ImportError:\n raise", "def test_T01():", "def main():\n modtype = input( \"test iterative (i) or recursive (r) module: \" )\n if modtype.lower().strip() == \"i\":\n import myListIter \n listmodule = myListIter\n print( 'iter' )\n elif modtype.lower().strip() == \"r\":\n import myListRec \n listmodule = myListRec\n print( 'rec' )\n else:\n print( \"Please enter 'i' or 'r' to test iterative/recursive library.\" )\n return\n testAppendAndToString( listmodule )\n testClear( listmodule )\n testInsert( listmodule )\n testGet( listmodule )\n testSet( listmodule )\n testPop( listmodule )\n testIndex( listmodule )\n testCursor( listmodule ) \n\n #testClone( listmodule )\n #testExtend( listmodule )\n testRemove( listmodule )\n testCount( listmodule )\n testPyListToMyList( listmodule )\n testMyListToPyList( listmodule )\n print()", "def test_suite():\n test(sum_upto_first_even([1,3,2]),4)\n test(sum_upto_first_even([1,3,3]),7)\n test(sum_upto_first_even([2,3,3]),0)", "def test_T2():", "def test_T2():", "def inner_test():\n pass", "def inner_test():\n pass", "def test_T1():", "def test_T1():", "def test_get_result_top_files(self):\n pass", "def test_integration2(self):\n self._test_integration(2)", "def test_returns_sorted_projects_by_priority_if_sort_by_set_to_priority(self):\n # Arrange\n # Set priority of test_project_1 to urgent.\n self.test_project_1.priority = ProjectPriority.URGENT.value\n self.test_project_1.save()\n # Set project_2 to be allowed for all users removing as private.\n self.test_project_2.private = False\n self.test_project_2.priority = ProjectPriority.HIGH.value\n self.test_project_2.save()\n # Set priority of test_project_1 to low and status to published.\n self.test_project_3.status = ProjectStatus.PUBLISHED.value\n self.test_project_3.priority = ProjectPriority.MEDIUM.value\n self.test_project_3.save()\n test_project_4 = Project.clone(self.test_project_2.id, self.test_author.id)\n test_project_4.status = ProjectStatus.PUBLISHED.value\n test_project_4.priority = ProjectPriority.LOW.value\n test_project_4.save()\n\n # Test for descending order\n # Act\n response_desc = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"orderBy\": \"priority\", \"orderByType\": \"DESC\"},\n )\n # Assert\n self.assertEqual(response_desc.status_code, 200)\n self.assertEqual(len(response_desc.json[\"results\"]), 4)\n expected_desc_order = [\n test_project_4.id,\n self.test_project_3.id,\n self.test_project_2.id,\n self.test_project_1.id,\n ]\n self.assertListEqual(\n [i[\"projectId\"] for i in response_desc.json[\"results\"]], expected_desc_order\n )\n\n # Test for ascending order\n # Act\n response_asc = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"orderBy\": \"priority\", \"orderByType\": \"ASC\"},\n )\n # Assert\n self.assertEqual(response_asc.status_code, 200)\n self.assertEqual(len(response_asc.json[\"results\"]), 4)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_asc.json[\"results\"]],\n expected_desc_order[::-1],\n )", "def test_transform_output(argument_pair):\n ...", "def sort_results(self):\n pass", "def test1():\n print(80 * \"#\" + \"\\nTests for generic sorting and binary search.\")\n test1_1()\n test1_2()\n test1_3()\n test1_4()\n test1_5()", "def test1():\n print(80 * \"#\" + \"\\nTests for generic sorting and binary search.\")\n test1_1()\n test1_2()\n test1_3()\n test1_4()\n test1_5()", "def _check_results(self, caller_info, module_info, results, module_ignore_errors, verbosity):\n if module_ignore_errors:\n return\n\n filename, line_number, function_name, lines, index = caller_info\n caller_str = \"{}::{}#{}\".format(filename, function_name, line_number)\n\n if isinstance(self, AnsibleHosts):\n hosts_str = json.dumps(self.hostnames)\n elif isinstance(self, AnsibleHost):\n hosts_str = json.dumps(self.hostnames[0])\n results = results.get(self.hostnames[0], {})\n else:\n raise TypeError(\"Unsupported type of object: {}\".format(type(self)))\n\n if isinstance(module_info, dict):\n module_names = json.dumps(module_info.get(\"module_name\", \"\"))\n hint_str = \"AnsibleModule::{}\".format(module_names)\n elif isinstance(module_info, list):\n module_names = \", \".join([module_item.get(\"module_name\", \"\") for module_item in module_info])\n hint_str = \"AnsibleModules::{}\".format(json.dumps(module_names))\n else:\n raise TypeError(\"Got {}, expected tuple or list of tuples, tuple items: \"\n \"module_name, module_args, module_kwargs, module_attrs\".format(type(module_info)))\n\n err_msg = \"\"\n if verbosity <= 0: # No information of module and result\n err_msg = \"Run ansible module failed\"\n elif verbosity == 1: # Log module name only. Do not log args and result\n err_msg = \"{}: {} -> {} failed\".format(\n caller_str,\n hosts_str,\n hint_str\n )\n elif verbosity >= 2: # Log module name, args and result\n if verbosity == 2:\n indent = None\n elif verbosity >= 3:\n indent = 4\n\n err_msg = \"{}: {} -> {} failed, Results => {}\".format(\n caller_str,\n hosts_str,\n hint_str,\n json.dumps(results, indent=indent)\n )\n\n if isinstance(self, AnsibleHosts):\n if isinstance(module_info, dict):\n failed = any([res[\"failed\"] for res in results.values()])\n else:\n failed = any([any([res[\"failed\"] for res in module_results]) for module_results in results.values()])\n elif isinstance(self, AnsibleHost):\n if isinstance(module_info, dict):\n failed = results[\"failed\"]\n else:\n failed = any([res[\"failed\"] for res in results])\n if failed:\n raise RunAnsibleModuleFailed(err_msg)", "def test_package(self):\n pass", "def run_test(self):\n\n # populate *_ps sets\n self.enter_project_file()\n\n # populate *_dir sets\n self.enter_directories()\n\n # The files in the directories makes up the largest possible set of files\n self.result_files = self.result_files_dir\n self.design_files = self.design_files_dir\n self.design_space_files = self.design_space_files_dir\n self.test_bench_files = self.test_bench_files_dir\n\n # populate *_ms sets\n self.enter_meta_results_file()\n\n # populate *_OK sets\n self.check_analysis_status()\n\n df = {'design_files_dir' : list(self.design_files_dir),'design_files_pr' : list(self.design_files_pr),\n 'design_files_ms' : list(self.design_files_ms), 'design_files_OK' : list(self.design_files_OK)}\n\n ds = {'design_space_files_dir' : list(self.design_space_files_dir),\n 'design_space_files_pr' : list(self.design_space_files_pr)}\n\n rs = {'result_files_dir' : list(self.result_files_dir), 'result_files_ms' : list(self.result_files_ms),\n 'result_files_OK' : list(self.result_files_OK)}\n\n tb = {'test_bench_files_dir' : list(self.test_bench_files_dir),\n 'test_bench_files_ms' : list(self.test_bench_files_ms)}\n\n srl = SummaryReportsLinks(self.result_files_dir)\n\n lf = {'files_linked_from_sum_reps' : srl.get_files(),\n 'folders_linked_from_sum_reps' : srl.get_folders()}\n\n # 'test_bench_files_pr' : list(self.test_bench_files_pr),\n \n json_test = {'design_files' : df, 'design_space_files' : ds, 'result_files' : rs,\n 'test_bench_files' : tb, 'stat_files' : self.stat_files,\n 'files_linked_from_sum_reps' : lf}\n\n with open('test_run.json','wb') as f_out:\n json.dump(json_test, f_out, indent=4)", "def module_load_tests(loader, found_tests, pattern):\n\n result = testresources.OptimisingTestSuite()\n found_tests = testscenarios.load_tests_apply_scenarios(\n loader, found_tests, pattern)\n result.addTest(found_tests)\n return result", "def test_run_exec(self):\n from multiprocessing import Process, Queue\n output = Queue()\n repodir = \"~/codes/ci/tests/repo\"\n processes = []\n for i in range(3):\n processes.append(Process(target=run_exec, args=(repodir, \"ls -la\", output, i)))\n processes[-1].start()\n \n #Wait for the unit tests to all finish.\n for p in processes:\n p.join()\n results = [output.get() for p in processes]\n ordered = {o[\"index\"]: o for o in results}\n\n #We consider the test successful if the output files were created and the end time\n #is not None. That means that the process ran correctly and python didn't lose\n #control of the subprocess.\n from os import path\n fullrepo = path.expanduser(repodir)\n for i in range(3):\n self.assertTrue(path.isfile(path.join(fullrepo, \"{}.cidat\".format(i))))\n self.assertIsNotNone(ordered[i][\"end\"])\n self.assertEqual(ordered[i][\"code\"], 0)", "def test_compare(self):", "def test_two_ordered():\n run_mergesort([1, 2], [1, 2])", "def pytest_collection_modifyitems(config, items):\n # check if studio tests myst be skipped\n run_study = config.getoption(\"--runstudy\")\n # 'all' will match all studies, '' will not match anything\n run_study = {'': '(?!x)x', 'all': '.*'}.get(run_study, run_study)\n # --runstudy given in cli: do not skip study tests and\n test_selected = list()\n test_skipped = list()\n groups = dict()\n incremental = pytest.mark.incremental()\n\n def add():\n \"helper for gathering test info\"\n marker = item.get_marker(mark)\n kwargs = parse_args(marker.args, marker.kwargs)\n group_name = kwargs['name']\n group = groups.setdefault(group_name, dict())\n group.setdefault(mark, list()).append((kwargs, item))\n item.add_marker(incremental)\n\n # place every test in regular, prerequisite and studies\n # group by name\n for item in items:\n for mark in set(item.keywords.keys()).intersection(MARKS):\n add()\n break\n else:\n test_selected.append(item)\n\n def sort(a, b):\n \"Sort two items by order priority\"\n return cmp(a[0]['order'], b[0]['order'])\n\n # use studies precedence to built the global sequence order\n mandatory = 'study' # mandatory mark for global sorting: study\n studies = list()\n for name, info in groups.items():\n studies.extend(info.get(mandatory, []))\n studies.sort(sort)\n\n def append(tests, where):\n \"helper to add the test item from info structure\"\n for test in tests:\n test = test[1]\n if test not in where:\n where.append(test)\n\n # select only the test that are going to be launched\n width = 0\n regexp = re.compile(run_study, re.I | re.DOTALL)\n for study in studies:\n group_name = study[0]['name']\n width = max(width, len(group_name))\n where = test_selected if regexp.search(group_name) else test_skipped\n for mark, seq in groups[group_name].items():\n if mark == mandatory:\n continue\n seq.sort(sort)\n append(seq, where)\n append([study], where)\n\n if config.getoption(\"--show_order\") or config.getoption(\"--debug\"):\n fmt = \"{0:>3d} [{1:>%s}] {2}\" % width\n for i, item in enumerate(test_selected + test_skipped):\n study = get_study_name(item)\n fqn = get_FQN(item)\n line = fmt.format(i, study, fqn)\n if item in test_selected:\n line = term.green('+' + line)\n else:\n line = term.yellow('-' + line)\n print(line)\n\n # we make the --runstudy check at the end to be able to show\n # test order with --show_order or --debig options\n # reorder tests by group name and replace items IN-PLACE\n if run_study:\n items[:] = test_selected\n return\n\n skip_test = pytest.mark.skip(reason=\"need --runstudy option to run\")\n for item in items:\n if set(item.keywords.keys()).intersection(MARKS):\n item.add_marker(skip_test)", "def execute_testsets(testsets):\n group_results = dict() #results, by group\n group_failure_counts = dict()\n total_failures = 0\n myinteractive = False\n\n for testset in testsets:\n mytests = testset.tests\n myconfig = testset.config\n mybenchmarks = testset.benchmarks\n\n #Make sure we actually have tests to execute\n if not mytests and not mybenchmarks:\n # no tests in this test set, probably just imports.. skip to next test set\n break\n\n myinteractive = True if myinteractive or myconfig.interactive else False\n\n #Run tests, collecting statistics as needed\n for test in mytests:\n #Initialize the dictionaries to store test fail counts and results\n if test.group not in group_results:\n group_results[test.group] = list()\n group_failure_counts[test.group] = 0\n\n result = run_test(test, test_config = myconfig)\n result.body = None # Remove the body, save some memory!\n\n if not result.passed: #Print failure, increase failure counts for that test group\n logging.error('Test Failed: '+test.name+\" URL=\"+test.url+\" Group=\"+test.group+\" HTTP Status Code: \"+str(result.response_code))\n\n if test.validators is not None:\n for validator in test.validators:\n if validator.passed == False:\n logging.warning(\" Validation Failed: \" + str(validator))\n\n #Increment test failure counts for that group (adding an entry if not present)\n failures = group_failure_counts[test.group]\n failures = failures + 1\n group_failure_counts[test.group] = failures\n\n else: #Test passed, print results\n logging.info('Test Succeeded: '+test.name+\" URL=\"+test.url+\" Group=\"+test.group)\n\n #Add results for this test group to the resultset\n group_results[test.group].append(result)\n\n # handle stop_on_failure flag\n if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:\n print 'STOP ON FAILURE! stopping test set execution, continuing with other test sets'\n break\n\n for benchmark in mybenchmarks: # Run benchmarks, analyze, write\n if not benchmark.metrics:\n logging.debug('Skipping benchmark, no metrics to collect')\n continue\n\n logging.info(\"Benchmark Starting: \"+benchmark.name+\" Group: \"+benchmark.group)\n curl = configure_curl(benchmark, myconfig)\n benchmark_result = run_benchmark(curl, benchmark, myconfig)\n print benchmark_result\n logging.info(\"Benchmark Done: \"+benchmark.name+\" Group: \"+benchmark.group)\n\n if benchmark.output_file: # Write file\n write_method = OUTPUT_METHODS[benchmark.output_format]\n my_file = open(benchmark.output_file, 'w') # Overwrites file\n logging.debug(\"Benchmark writing to file: \" + benchmark.output_file)\n write_method(my_file, benchmark_result, benchmark, test_config = myconfig)\n my_file.close()\n\n if myinteractive:\n # a break for when interactive bits are complete, before summary data\n print \"===================================\"\n\n #Print summary results\n for group in sorted(group_results.keys()):\n test_count = len(group_results[group])\n failures = group_failure_counts[group]\n total_failures = total_failures + failures\n if (failures > 0):\n print u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'\n else:\n print u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'\n\n return total_failures", "def test_customize_test_loads(self):\n self.create_user_with_role(\n self.user.name, self.user.email, self.user.password, Role.tester)\n self.create_forktest(\"own-fork-commit\", TestPlatform.linux, regression_tests=[2])\n self.create_completed_regression_t_entries(3, [2])\n response = self.app.test_client().get('/test/3')\n self.assertEqual(response.status_code, 200)\n self.assert_template_used('test/by_id.html')\n regression_tests = RegressionTest.query.all()\n self.assertIn(regression_tests[1].command, str(response.data))\n self.assertNotIn(regression_tests[0].command, str(response.data))", "def test_returns_sorted_projects_by_difficulty_if_sort_by_set_to_difficulty(self):\n # Arrange\n # Set difficulty of test_project_1 to easy.\n self.test_project_1.difficulty = ProjectDifficulty.EASY.value\n self.test_project_1.save()\n # Set project_2 to be allowed for all users removing as private.\n self.test_project_2.private = False\n self.test_project_2.difficulty = ProjectDifficulty.MODERATE.value\n self.test_project_2.save()\n # Set difficulty of test_project_1 to hard and status to published.\n self.test_project_3.status = ProjectStatus.PUBLISHED.value\n self.test_project_3.difficulty = ProjectDifficulty.CHALLENGING.value\n self.test_project_3.save()\n\n # Test for descending order\n # Act\n response_desc = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"orderBy\": \"difficulty\", \"orderByType\": \"DESC\"},\n )\n # Assert\n self.assertEqual(response_desc.status_code, 200)\n self.assertEqual(len(response_desc.json[\"results\"]), 3)\n expected_desc_order = [\n self.test_project_3.id,\n self.test_project_2.id,\n self.test_project_1.id,\n ]\n self.assertListEqual(\n [i[\"projectId\"] for i in response_desc.json[\"results\"]], expected_desc_order\n )\n\n # Test for ascending order\n # Act\n response_asc = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"orderBy\": \"difficulty\", \"orderByType\": \"ASC\"},\n )\n # Assert\n self.assertEqual(response_asc.status_code, 200)\n self.assertEqual(len(response_asc.json[\"results\"]), 3)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_asc.json[\"results\"]],\n expected_desc_order[::-1],\n )", "def main():\n zipper_two_lists_tests()\n zipper_two_tuples_tests()\n zipper_list_and_tuple_tests()", "def test_importer_returns_tests():\n flowtask = FlowTaskFactory()\n flowtask.build_flow.build.org = OrgFactory()\n with temporary_dir() as output_dir:\n copyfile(\n TEST_ROBOT_OUTPUT_FILES / \"robot_with_failures.xml\",\n Path(output_dir) / \"output.xml\",\n )\n actual = robot_importer.import_robot_test_results(flowtask, output_dir)\n expected = [\n {\n \"name\": \"Passing test\",\n \"group\": \"Robot Fail\",\n \"status\": \"Pass\",\n \"start_time\": \"2020-06-23T18:49:20.955000+00:00\",\n \"end_time\": \"2020-06-23T18:49:20.956000+00:00\",\n \"exception\": \"Life is good, yo.\",\n \"doc\": \"\",\n \"tags\": [\"tag one\", \"tag two\"],\n },\n {\n \"name\": \"Failing test 1\",\n \"group\": \"Robot Fail\",\n \"status\": \"Fail\",\n \"start_time\": \"2020-06-23T18:49:20.957000+00:00\",\n \"end_time\": \"2020-06-23T18:49:20.960000+00:00\",\n \"exception\": \"Danger, Will Robinson!\",\n \"doc\": \"A test that fails with a keyword directly in the test\",\n \"tags\": [],\n },\n {\n \"name\": \"Failing test 2\",\n \"group\": \"Robot Fail\",\n \"status\": \"Fail\",\n \"start_time\": \"2020-06-23T18:49:20.960000+00:00\",\n \"end_time\": \"2020-06-23T18:49:20.963000+00:00\",\n \"doc\": \"A test that fails due to a failure in a lower level keyword.\",\n \"exception\": \"I'm sorry, Dave. I'm afraid I can't do that.\",\n \"tags\": [],\n },\n {\n \"name\": \"Failing test 3\",\n \"group\": \"Robot Fail\",\n \"status\": \"Fail\",\n \"start_time\": \"2020-06-23T18:49:21.017000+00:00\",\n \"end_time\": \"2020-06-23T18:49:21.024000+00:00\",\n \"exception\": (\n \"Several failures occurred:\\n\\n\"\n \" 1) First failure\\n\\n\"\n \" 2) Second failure\"\n ),\n \"doc\": \"A test that has multiple keyword failures\",\n \"tags\": [],\n },\n ]\n assert actual == expected", "def unitary_test():", "def test_ordering(self):\n # \"Album 1\" and \"Album 3\" are regular albums\n # \"Album 2\" is V/A\n # The remaining track will create a non-album track.\n self.add_mp3(artist='Artist 1', title='Title 1',\n album='Album 1', filename='song1.mp3', path='album_1')\n self.add_mp3(artist='Artist 1', title='Title 2',\n album='Album 1', filename='song2.mp3', path='album_1')\n self.add_mp3(artist='Artist 1', title='Title 3',\n album='Album 2', filename='song3.mp3', path='album_2')\n self.add_mp3(artist='Artist 2', title='Title 4',\n album='Album 2', filename='song4.mp3', path='album_2')\n self.add_mp3(artist='Artist 1', title='Title 5',\n album='Album 3', filename='song5.mp3', path='album_3')\n self.add_mp3(artist='Artist 1', title='Title 6',\n album='Album 3', filename='song6.mp3', path='album_3')\n self.add_mp3(artist='Artist 1', title='Title 7',\n filename='song7.mp3')\n self.run_add()\n\n artist = Artist.objects.get(name='Artist 1')\n\n self.assertEqual(Album.objects.count(), 4)\n reg_album_1 = Album.objects.get(name='Album 1')\n reg_album_2 = Album.objects.get(name='Album 3')\n va_album = Album.objects.get(name='Album 2')\n misc_album = Album.objects.get(miscellaneous=True)\n\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, '4 albums')\n self.assertContains(response, '6 songs')\n self.assertQuerysetEqual(response.context['albums'].data,\n [repr(al) for al in [reg_album_1, reg_album_2, misc_album, va_album]])\n self.assertQuerysetEqual(response.context['songs'].data,\n [repr(s) for s in Song.objects.filter(artist=artist).order_by('title')])\n\n # There are certainly some duplicate tests happening down here.\n for album in [reg_album_1, reg_album_2, misc_album, va_album]:\n self.assertContains(response, str(album))\n self.assertContains(response, str(album.artist))\n self.assertContains(response, reverse('exordium:album', args=(album.pk,)))\n self.assertContains(response, reverse('exordium:artist', args=(album.artist.normname,)))\n for song in Song.objects.filter(artist=artist):\n self.assertContains(response, str(song.title))\n self.assertContains(response, song.get_download_url_html5())\n self.assertContains(response, song.get_download_url_m3u())\n for song in Song.objects.exclude(artist=artist):\n self.assertNotContains(response, str(song.title))\n self.assertNotContains(response, song.get_download_url_html5())\n self.assertNotContains(response, song.get_download_url_m3u())", "def pytest_before_group_items(session, config, items):", "def process_test_start(self, config, results, result_id, db):\n pass", "def test_import_allows_multiple_modules_successful(self):\n # Deliberately using modules that will already be imported to avoid side effects.\n feature = LazyImportTester([\"site\", \"sys\"])\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n self.assertTrue(feature)\n check.assert_called_once()", "def test_patch_group(self):\n pass", "def test_patch_group(self):\n pass", "def test_three_split():\n run_mergesort([3, 0, 3], [0, 3, 3])" ]
[ "0.8273559", "0.71171457", "0.59634614", "0.59479153", "0.58932537", "0.5843216", "0.57562256", "0.57490045", "0.57445383", "0.5740733", "0.57388484", "0.5723813", "0.57204384", "0.5707784", "0.5704445", "0.57039195", "0.56746626", "0.56736004", "0.5648056", "0.56323606", "0.5616576", "0.5611849", "0.56104773", "0.5606904", "0.560013", "0.55976397", "0.55842614", "0.55831844", "0.555388", "0.5548459", "0.55381584", "0.5535436", "0.5515833", "0.5515152", "0.55101395", "0.5497748", "0.5487534", "0.54817176", "0.54762083", "0.54671514", "0.5460435", "0.5460435", "0.5447573", "0.54438627", "0.5438037", "0.5434203", "0.542144", "0.5419108", "0.5416339", "0.5401147", "0.5393013", "0.5383019", "0.5373136", "0.53589064", "0.5356354", "0.5356354", "0.5356354", "0.5356354", "0.5356354", "0.53513056", "0.5350829", "0.534859", "0.5345729", "0.5342144", "0.5340934", "0.53395426", "0.5339254", "0.5339254", "0.5329661", "0.5329661", "0.5320765", "0.5320765", "0.53141683", "0.5314084", "0.53124297", "0.53071064", "0.5305325", "0.53048706", "0.53048706", "0.53011745", "0.5293106", "0.52917933", "0.52892274", "0.52879965", "0.5287401", "0.52837276", "0.5282286", "0.52776456", "0.52711445", "0.526826", "0.526642", "0.5260566", "0.52509767", "0.5247107", "0.524676", "0.52431554", "0.5237368", "0.5236813", "0.5236813", "0.5228993" ]
0.8312768
0
Test _arrange_test_result method with multi runner.
def test_arrange_test_result_multi_runner(self): runner_a_pass_1 = self._create_test_result(runner_name='runner_a', status=test_runner_base.PASSED_STATUS) runner_a_pass_2 = self._create_test_result(runner_name='runner_a', status=test_runner_base.PASSED_STATUS) runner_a_pass_3 = self._create_test_result(runner_name='runner_a', status=test_runner_base.PASSED_STATUS) runner_b_fail_1 = self._create_test_result(runner_name='runner_b', status=test_runner_base.FAILED_STATUS) runner_b_fail_2 = self._create_test_result(runner_name='runner_b', status=test_runner_base.FAILED_STATUS) runner_b_ignore_1 = self._create_test_result(runner_name='runner_b', status=test_runner_base.IGNORED_STATUS) reporter_1 = result_reporter.ResultReporter() reporter_1.all_test_results.extend([runner_a_pass_1, runner_a_pass_2, runner_a_pass_3]) reporter_2 = result_reporter.ResultReporter() reporter_2.all_test_results.extend([runner_b_fail_1, runner_b_fail_2, runner_b_ignore_1]) info_dict = {} aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2]) expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0, aei._STATUS_FAILED_KEY : 0, aei._STATUS_PASSED_KEY : 3} self.assertEqual( expect_group_a_summary, info_dict[aei._TEST_RUNNER_KEY]['runner_a']['someModule'][aei._SUMMARY_KEY]) expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 1, aei._STATUS_FAILED_KEY : 2, aei._STATUS_PASSED_KEY : 0} self.assertEqual( expect_group_b_summary, info_dict[aei._TEST_RUNNER_KEY]['runner_b']['someModule'][aei._SUMMARY_KEY]) expect_total_summary = {aei._STATUS_IGNORED_KEY : 1, aei._STATUS_FAILED_KEY : 2, aei._STATUS_PASSED_KEY : 3} self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_arrange_test_result_multi_module(self):\n group_a_pass_1 = self._create_test_result(group_name='grpup_a',\n status=test_runner_base.PASSED_STATUS)\n group_b_pass_1 = self._create_test_result(group_name='grpup_b',\n status=test_runner_base.PASSED_STATUS)\n group_c_pass_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.PASSED_STATUS)\n group_b_fail_1 = self._create_test_result(group_name='grpup_b',\n status=test_runner_base.FAILED_STATUS)\n group_c_fail_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.FAILED_STATUS)\n group_c_ignore_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.IGNORED_STATUS)\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([group_a_pass_1, group_b_pass_1, group_c_pass_1])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([group_b_fail_1, group_c_fail_1, group_c_ignore_1])\n\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 0,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_a_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_a'][aei._SUMMARY_KEY])\n\n expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 1,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_b_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_b'][aei._SUMMARY_KEY])\n\n expect_group_c_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 1,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_c_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_c'][aei._SUMMARY_KEY])\n\n expect_total_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])", "def test_arrange_test_result_one_module(self):\n pass_1 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_2 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_3 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n fail_1 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n fail_2 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n ignore_1 = self._create_test_result(status=test_runner_base.IGNORED_STATUS)\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([pass_1, pass_2, pass_3])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([fail_1, fail_2, ignore_1])\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_summary, info_dict[aei._TOTAL_SUMMARY_KEY])", "def test_after_jam_step_two(self):\n for test_suite_class in self.jam_step_2_test_suite_list:\n test_suite = test_suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def execute_testsets(testsets):\n group_results = dict() #results, by group\n group_failure_counts = dict()\n total_failures = 0\n myinteractive = False\n\n for testset in testsets:\n mytests = testset.tests\n myconfig = testset.config\n mybenchmarks = testset.benchmarks\n\n #Make sure we actually have tests to execute\n if not mytests and not mybenchmarks:\n # no tests in this test set, probably just imports.. skip to next test set\n break\n\n myinteractive = True if myinteractive or myconfig.interactive else False\n\n #Run tests, collecting statistics as needed\n for test in mytests:\n #Initialize the dictionaries to store test fail counts and results\n if test.group not in group_results:\n group_results[test.group] = list()\n group_failure_counts[test.group] = 0\n\n result = run_test(test, test_config = myconfig)\n result.body = None # Remove the body, save some memory!\n\n if not result.passed: #Print failure, increase failure counts for that test group\n logging.error('Test Failed: '+test.name+\" URL=\"+test.url+\" Group=\"+test.group+\" HTTP Status Code: \"+str(result.response_code))\n\n if test.validators is not None:\n for validator in test.validators:\n if validator.passed == False:\n logging.warning(\" Validation Failed: \" + str(validator))\n\n #Increment test failure counts for that group (adding an entry if not present)\n failures = group_failure_counts[test.group]\n failures = failures + 1\n group_failure_counts[test.group] = failures\n\n else: #Test passed, print results\n logging.info('Test Succeeded: '+test.name+\" URL=\"+test.url+\" Group=\"+test.group)\n\n #Add results for this test group to the resultset\n group_results[test.group].append(result)\n\n # handle stop_on_failure flag\n if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:\n print 'STOP ON FAILURE! stopping test set execution, continuing with other test sets'\n break\n\n for benchmark in mybenchmarks: # Run benchmarks, analyze, write\n if not benchmark.metrics:\n logging.debug('Skipping benchmark, no metrics to collect')\n continue\n\n logging.info(\"Benchmark Starting: \"+benchmark.name+\" Group: \"+benchmark.group)\n curl = configure_curl(benchmark, myconfig)\n benchmark_result = run_benchmark(curl, benchmark, myconfig)\n print benchmark_result\n logging.info(\"Benchmark Done: \"+benchmark.name+\" Group: \"+benchmark.group)\n\n if benchmark.output_file: # Write file\n write_method = OUTPUT_METHODS[benchmark.output_format]\n my_file = open(benchmark.output_file, 'w') # Overwrites file\n logging.debug(\"Benchmark writing to file: \" + benchmark.output_file)\n write_method(my_file, benchmark_result, benchmark, test_config = myconfig)\n my_file.close()\n\n if myinteractive:\n # a break for when interactive bits are complete, before summary data\n print \"===================================\"\n\n #Print summary results\n for group in sorted(group_results.keys()):\n test_count = len(group_results[group])\n failures = group_failure_counts[group]\n total_failures = total_failures + failures\n if (failures > 0):\n print u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'\n else:\n print u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'\n\n return total_failures", "def after_test(self, test_results):\n pass", "def main():\n test_merge_quick_sort()\n test_compare()", "def run_combined(self):\n self.runtest_autokey()\n self.runtest_mediaresource()\n self.runtest_composite_slug()\n self.runtest_all_types()\n self.runtest_complex_types()\n self.runtest_only_key()\n self.runtest_compound_key()\n self.runtest_simple_select()\n self.runtest_paging()\n self.runtest_nav_o2o()\n self.runtest_nav_o2o_1()\n self.runtest_nav_zo2o()\n self.runtest_nav_zo2o_f()\n self.runtest_nav_zo2o_b()\n self.runtest_nav_many2o()\n self.runtest_nav_many2o_f()\n self.runtest_nav_many2o_b()\n self.runtest_nav_many2zo()\n self.runtest_nav_many2zo_f()\n self.runtest_nav_many2zo_b()\n self.runtest_nav_many2zo_r()\n self.runtest_nav_many2zo_rf()\n self.runtest_nav_many2zo_rb()\n self.runtest_nav_many2many()\n self.runtest_nav_many2many_1()\n self.runtest_nav_many2many_r()\n self.runtest_nav_many2many_r1()", "def test_get_results(self):\n pass", "def getTestResults():", "def test_result_group_can_be_sorted_by_other_metrics(\n self, result_group_roc: ResultGroup, result_1: Result, result_2: Result\n ):\n assert result_group_roc.results == [result_1, result_2]", "def runTests(self):\n \n pass", "def test_list_runs(self):\n pass", "def _handler_test_run_tests(self, *args, **kwargs):\n next_state = None\n result = None\n\n tc_pass = False\n tt_pass = False\n tp_pass = False\n tc_result = None\n tt_result = None\n tp_result = None\n\n test_result = {}\n\n try:\n tc_pass, tc_result = self._do_cmd_resp('tc', timeout=200)\n tt_pass, tt_result = self._do_cmd_resp('tt', timeout=200)\n tp_pass, tp_result = self._do_cmd_resp('tp', timeout=200)\n \n except Exception as e:\n test_result['exception'] = e\n test_result['message'] = 'Error running instrument tests.'\n \n finally:\n test_result['cond_test'] = 'Passed' if tc_pass else 'Failed'\n test_result['cond_data'] = tc_result\n test_result['temp_test'] = 'Passed' if tt_pass else 'Failed'\n test_result['temp_data'] = tt_result\n test_result['pres_test'] = 'Passed' if tp_pass else 'Failed'\n test_result['pres_data'] = tp_result\n test_result['success'] = 'Passed' if (tc_pass and tt_pass and tp_pass) else 'Failed'\n \n self._driver_event(DriverAsyncEvent.TEST_RESULT, test_result)\n next_state = SBE37ProtocolState.COMMAND\n \n return (next_state, result)", "def run_stage_loop(cls, _opts, tests_results, put_next_stage):\n for _, result in tests_results:\n put_next_stage(result)", "def runtest(self):", "def do_test():\n for x in execute_helper(test_info,crossmap_tests):\n yield x", "def test_one():\n run_mergesort([1], [1])", "def test_batch(self):\n pass", "def test_multiple_commands_at_same_time(self):", "def test_basic_execution(self):", "def test_something(self):\n\n allure.dynamic.title(\"Testing compute_ranks\")\n allure.dynamic.severity(allure.severity_level.NORMAL)\n allure.dynamic.description_html('<h3>Codewars badge:</h3>'\n '<img src=\"https://www.codewars.com/users/myFirstCode'\n '/badges/large\">'\n '<h3>Test Description:</h3>'\n \"<p>Test the function taht organizes a sports league in a \"\n \"round-robin-system. Each team meets all other teams. \"\n \"In your league a win gives a team 2 points, a draw gives \"\n \"both teams 1 point. After some games you have to compute \"\n \"the order of the teams in your league. You use the following \"\n \"criteria to arrange the teams:</p>\"\n \"<ul><li>- Points</li>\"\n \"<li>- Scoring differential (the difference between goals \"\n \"scored and those conceded)</li>\"\n \"<li>- Goals scored</li></ul>\")\n\n test_data = [\n (6,\n [[0, 5, 2, 2],\n [1, 4, 0, 2],\n [2, 3, 1, 2],\n [1, 5, 2, 2],\n [2, 0, 1, 1],\n [3, 4, 1, 1],\n [2, 5, 0, 2],\n [3, 1, 1, 1],\n [4, 0, 2, 0]],\n [4, 4, 6, 3, 1, 2]),\n (6,\n [[0, 5, 2, 0],\n [1, 4, 2, 2],\n [2, 3, 1, 3],\n [1, 5, 0, 0],\n [2, 0, 2, 1],\n [3, 4, 3, 1]],\n [2, 3, 4, 1, 5, 6]),\n (4,\n [[0, 3, 1, 1],\n [1, 2, 2, 2],\n [1, 3, 2, 0],\n [2, 0, 2, 0]],\n [3, 1, 1, 3]),\n (10,\n [],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),\n (8,\n [[0, 7, 2, 0]],\n [1, 2, 2, 2, 2, 2, 2, 8])\n ]\n\n for data in test_data:\n number = data[0]\n games = data[1]\n expected = data[2]\n actual_result = compute_ranks(number, games)\n print_log(number=number,\n games=games,\n expected=expected,\n actual_result=actual_result)\n\n with allure.step(\"Enter a test data and verify the result:\"):\n self.assertEqual(expected, actual_result)", "def test_by_order(self):\n # addon_executor = AddonExecutor(execute_order, stop_order)\n # self.assertEqual(expected, addon_executor.execute_with_order(addon, execute_order, stop_order))\n self.run_mgr.by_order(self.cli_inst, ['execute', 'start'], ['stop'])\n output = self._get_lines_as_list(sys.stdout)\n\n self.assertTrue(output[0].startswith('Execute'))\n self.assertTrue(output[1].startswith('Start'))\n self.assertTrue(output[2].startswith('Stop'))", "def test_result_order(env):\n timeouts = list(reversed([env.timeout(delay) for delay in range(3)]))\n\n def p(env, timeouts):\n results = yield env.all_of(timeouts)\n assert list(results.keys()) == timeouts\n\n env.process(p(env, timeouts))\n env.run()", "def __execute_tests(self, lst_tests):\n tests_pass = tests_fail = 0\n queue_of_result = multiprocessing.Queue()\n for test in lst_tests:\n process = multiprocessing.Process(\n target=TestRunner.__helper_execute_test,\n kwargs={\"test_cls\": test,\n \"time_out\": self.__args.timeout,\n \"channel\": queue_of_result})\n process.start()\n process.join()\n temp_result = {}\n if not queue_of_result.empty():\n temp_result = queue_of_result.get_nowait()\n\n if \"status\" in temp_result:\n if temp_result[\"status\"] == result.Status.PASSED:\n tests_pass += 1\n else:\n tests_fail += 1\n\n if \"json_path\" in temp_result:\n self.__lst_json_files.append(temp_result[\"json_path\"])\n\n if \"log_path\" in temp_result:\n self.__lst_log_files.append(temp_result[\"log_path\"])\n\n return tests_pass, tests_fail", "def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --", "def test_pyt_multitask(self):\n\n def run_display_test(defaults, ep_and_ex_counts):\n with testing_utils.capture_output() as f:\n parser = display_setup_args()\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n display_data(opt)\n str_output = f.getvalue()\n self.assertTrue(\n '[ loaded {} episodes with a total of {} examples ]'.format(\n ep_and_ex_counts[0], ep_and_ex_counts[1]\n ) in str_output,\n 'PytorchDataTeacher multitasking failed with '\n 'following args: {}'.format(opt)\n )\n\n task1 = 'babi:task1k:1'\n task2 = 'babi:task1k:2'\n dataset1 = 'flickr30k'\n dataset2 = 'vqa_v1'\n\n # Expected example and episode counts\n eps_and_exs_counts = [\n (1800, 1800),\n (1080, 1800),\n (29900, 29900),\n (29180, 29900),\n (277349, 277349)\n ]\n defaults = parser_defaults.copy()\n\n # 1.\n defaults['pytorch_teacher_task'] = '{},{}'.format(task1, task2)\n run_display_test(defaults, eps_and_exs_counts[0])\n\n # 2.\n defaults['pytorch_teacher_task'] = task1\n defaults['task'] = task2\n run_display_test(defaults, eps_and_exs_counts[1])\n\n # 3.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = dataset1\n run_display_test(defaults, eps_and_exs_counts[2])\n\n # 4.\n del defaults['pytorch_teacher_task']\n defaults['task'] = task1\n run_display_test(defaults, eps_and_exs_counts[3])\n\n # 5.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = '{},{}'.format(dataset1, dataset2)\n run_display_test(defaults, eps_and_exs_counts[4])", "def run(cls): \n tests_to_run = cls.config.TESTS # A list of 5-tuple elements specifying the tests to run. See the\n # 'Test Setup' section in config.py.template for more info.\n test_group_name = \"Alchemist Tests\" # A short string identifier for this test run.\n output_dir = cls.config.OUTPUT_DIR # The output file where we write results.\n \n try:\n os.makedirs(output_dir,0o777)\n except:\n pass\n num_tests_to_run = len(tests_to_run)\n\n print(OUTPUT_DIVIDER_STRING)\n if num_tests_to_run == 1:\n print(\"Running %d test in %s\" % (num_tests_to_run, test_group_name))\n else:\n print(\"Running %d tests in %s\" % (num_tests_to_run, test_group_name))\n failed_tests = []\n\n cls.before_run_tests()\n \n spark_settings = []\n for i in cls.config.SPARK_SETTINGS:\n spark_settings.append(i.to_array()[0])\n \n output_settings = []\n for i in cls.config.OUTPUT_SETTINGS:\n output_settings.append(i.to_array()[0])\n \n main_class = \"altest.AlTest\"\n\n for meta_data, opt_sets in tests_to_run:\n print(OUTPUT_DIVIDER_STRING + '\\n')\n# print(\"Running test command: '%s' ... \" % main_class)\n \n meta = {}\n meta_pairs = [i.to_tuple() for i in meta_data]\n for mp in meta_pairs:\n meta[mp[0].replace('-', '_')] = mp[1].replace('0x20', ' ')\n \n meta_settings = []\n for i in meta_data:\n meta_settings.append(i.to_array()[0])\n \n# stdout_filename = \"%s/%s.out\" % (output_dir, meta['short_name'])\n# stderr_filename = \"%s/%s.err\" % (output_dir, meta['short_name'])\n# \n# out_file = open(output_dir + \"/\" + meta['short_name'] + \".out\", 'w')\n\n # Run a test for all combinations of the OptionSets given, then capture\n # and print the output.\n opt_set_arrays = [i.to_array() for i in opt_sets]\n for opt_list in itertools.product(*opt_set_arrays):\n\n cmd = cls.get_spark_submit_cmd(spark_settings, main_class, output_settings, meta_settings, opt_list)\n# print(\"\\nSetting env var SPARK_SUBMIT_OPTS: %s\" % java_opts_str)\n# test_env[\"SPARK_SUBMIT_OPTS\"] = java_opts_str\n print(\"Running command:\")\n print(\"%s\\n\" % cmd)\n Popen(cmd, shell=True, env=test_env).wait()\n\n try:\n src = output_dir + meta['short_name'] + '_latest/'\n src_files = os.listdir(src)\n src_file = src_files[0][:-4]\n new_dir = output_dir + src_file\n os.makedirs(new_dir)\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if (os.path.isfile(full_file_name)):\n shutil.copy(full_file_name, new_dir)\n except:\n pass\n \n# result_string = cls.process_output(config, meta['short_name'], opt_list,\n# stdout_filename, stderr_filename)\n# print(OUTPUT_DIVIDER_STRING)\n# print(\"\\nResult: \" + result_string)\n# print(OUTPUT_DIVIDER_STRING)\n# if \"FAILED\" in result_string:\n# failed_tests.append(meta['short_name'])\n# \n# \n# out_file.write(result_string + \"\\n\")\n# out_file.flush()\n\n if num_tests_to_run == 1:\n print(\"Finished running %d test in %s.\" % (num_tests_to_run, test_group_name))\n else:\n print(\"Finished running %d tests in %s.\" % (num_tests_to_run, test_group_name))\n# print(\"\\nNumber of failed tests: %d, failed tests: %s\" %\n# (len(failed_tests), \",\".join(failed_tests)))\n print(OUTPUT_DIVIDER_STRING)", "def test_insertSort3(self):\n\t\tsortObj=insertSort()\n\t\tself.assertEqual(sortObj.run_sort(self.test_3[0]),self.test_3[1])", "def test_run_exec(self):\n from multiprocessing import Process, Queue\n output = Queue()\n repodir = \"~/codes/ci/tests/repo\"\n processes = []\n for i in range(3):\n processes.append(Process(target=run_exec, args=(repodir, \"ls -la\", output, i)))\n processes[-1].start()\n \n #Wait for the unit tests to all finish.\n for p in processes:\n p.join()\n results = [output.get() for p in processes]\n ordered = {o[\"index\"]: o for o in results}\n\n #We consider the test successful if the output files were created and the end time\n #is not None. That means that the process ran correctly and python didn't lose\n #control of the subprocess.\n from os import path\n fullrepo = path.expanduser(repodir)\n for i in range(3):\n self.assertTrue(path.isfile(path.join(fullrepo, \"{}.cidat\".format(i))))\n self.assertIsNotNone(ordered[i][\"end\"])\n self.assertEqual(ordered[i][\"code\"], 0)", "def test_3():", "def sort_results(self):\n pass", "def __call__(self, result=None):\n self._pre_setup()\n super(TestCase, self).__call__(result)\n self._post_tearDown()", "def setUp(self):\n self.splits = (2,3,4)", "def _super_run_modified(self, result=None):\n\n orig_result = result\n if result is None:\n result = self.defaultTestResult()\n startTestRun = getattr(result, 'startTestRun', None)\n if startTestRun is not None:\n startTestRun()\n\n result.startTest(self)\n\n testMethod = getattr(self, self._testMethodName)\n if (getattr(self.__class__, \"__unittest_skip__\", False) or\n getattr(testMethod, \"__unittest_skip__\", False)):\n # If the class or method was skipped.\n try:\n skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')\n or getattr(testMethod, '__unittest_skip_why__', ''))\n self._addSkip(result, self, skip_why)\n finally:\n result.stopTest(self)\n return\n expecting_failure_method = getattr(testMethod,\n \"__unittest_expecting_failure__\", False)\n expecting_failure_class = getattr(self,\n \"__unittest_expecting_failure__\", False)\n expecting_failure = expecting_failure_class or expecting_failure_method\n outcome = Outcome(result)\n try:\n self._outcome = outcome\n\n with outcome.testPartExecutor(self):\n self.setUp()\n if outcome.success:\n outcome.expecting_failure = expecting_failure\n with outcome.testPartExecutor(self, isTest=True):\n testMethod()\n\n # 当前用例失败时触发on_errors回调\n if not outcome.success:\n with outcome.testPartExecutor(self):\n self.on_errors(outcome.errors)\n\n outcome.expecting_failure = False\n with outcome.testPartExecutor(self):\n self.tearDown()\n\n self.doCleanups()\n for test, reason in outcome.skipped:\n self._addSkip(result, test, reason)\n self._feedErrorsToResult(result, outcome.errors)\n if outcome.success:\n if expecting_failure:\n if outcome.expectedFailure:\n self._addExpectedFailure(result, outcome.expectedFailure)\n else:\n self._addUnexpectedSuccess(result)\n else:\n result.addSuccess(self)\n return result\n finally:\n result.stopTest(self)\n if orig_result is None:\n stopTestRun = getattr(result, 'stopTestRun', None)\n if stopTestRun is not None:\n stopTestRun()\n\n # explicitly break reference cycles:\n # outcome.errors -> frame -> outcome -> outcome.errors\n # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure\n del outcome.errors[:] # equivalent to [].clear in py3\n outcome.expectedFailure = None\n\n # clear the outcome, no more needed\n self._outcome = None", "def process_test_start(self, config, results, result_id, db):\n pass", "def test_concurrent_test_runs(self):\n num_passing_tests = 20\n num_failing_tests = 20\n num_error_tests = 20\n total_num_tests = num_passing_tests + num_failing_tests + num_error_tests\n\n times = [0] + [i for i in range(2 * total_num_tests)\n ] + [2 * total_num_tests - 1]\n result = self._make_result(times)\n threads = []\n names = []\n result.startTestRun()\n for i in range(num_passing_tests):\n name = 'passing_concurrent_test_%s' % i\n names.append(name)\n test_name = '__main__.MockTest.%s' % name\n # xml_reporter uses id(test) as the test identifier.\n # In a real testing scenario, all the test instances are created before\n # running them. So all ids will be unique.\n # We must do the same here: create test instance beforehand.\n test = MockTest(test_name)\n threads.append(threading.Thread(\n target=self._simulate_passing_test, args=(test, result)))\n for i in range(num_failing_tests):\n name = 'failing_concurrent_test_%s' % i\n names.append(name)\n test_name = '__main__.MockTest.%s' % name\n test = MockTest(test_name)\n threads.append(threading.Thread(\n target=self._simulate_failing_test, args=(test, result)))\n for i in range(num_error_tests):\n name = 'error_concurrent_test_%s' % i\n names.append(name)\n test_name = '__main__.MockTest.%s' % name\n test = MockTest(test_name)\n threads.append(threading.Thread(\n target=self._simulate_error_test, args=(test, result)))\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n result.stopTestRun()\n result.printErrors()\n tests_not_in_xml = []\n for tn in names:\n if tn not in self.xml_stream.getvalue():\n tests_not_in_xml.append(tn)\n msg = ('Expected xml_stream to contain all test %s results, but %s tests '\n 'are missing. List of missing tests: %s' % (\n total_num_tests, len(tests_not_in_xml), tests_not_in_xml))\n self.assertEqual([], tests_not_in_xml, msg)", "def run_tests():\n def print_result(result, correct):\n if result == correct:\n print(\" OK!\")\n else:\n print(f\" Failed ({result} != {correct})!\")\n for n, test in enumerate(_tests, start=1):\n print(f\"Running test {n}...\")\n nums = line2ints(test[\"in\"])\n try:\n correct = test[\"part1\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 1...\", end=\"\")\n result = part1(nums, steps=test.get(\"phases1\", 100))\n print_result(result, correct)\n try:\n correct = test[\"part2\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 2...\", end=\"\")\n result = part2(nums, steps=test.get(\"phases2\", 100))\n print_result(result, correct)", "def test_run_multi_r__(self):\n\n # Test Description\n # ================\n #\n # 1. This test intialises an example *eopy.matchup.matchupIO.MatchUp* object\n #\n # 2. Compare transformed dataset to expected value\n\n ################################################################################################################\n # 1. Initialise Test Data Object\n ################################################################################################################\n\n MatchUpTest = return_MatchUpTest_r__()\n\n ################################################################################################################\n # 2. Define expected values\n ################################################################################################################\n\n # Original dataset values (should be unchanged)\n MatchUpOriginal_expected = return_MatchUpTest_r__()\n\n # Transformed dataset\n values_expected = array([294.0625, 480.3733333, 300.6, 227.3846154, 210.1533333,\n 22.74193548, 22.0625, 21.96875, 22.80645161, 23.5,\n 21.66666667, 21.05882353, 23, 22.40625,\n 38.33333333, 36.63636364, 36.5, 38.42857143,\n 30.1, 32.14893617, 29.37254902, 28.88461538, 28.56603774,\n 33.45238095, 32.81395349, 31.77272727, 32.60465116,\n 40.125, 43.54054054, 38.59090909, 34.08510638,\n 13.72727273, 12, 14.1, 11.79069767, 17.53846154,\n 12.69565217, 31.16666667, 12.26086957, 11.52272727,\n 8.8125, 12, 7.4, 10.13207547])\n unc_expected = [Uncertainty(1, array([1.6, 1.5, 1.5, 1.3, 1.5])),\n Uncertainty(1, array([3.1, 3.2, 3.2, 3.1, 3.0])),\n Uncertainty(1, array([3.3, 3.4, 3.1, 3.2])),\n Uncertainty(1, array([2.1, 2.2, 2.2, 2.1])),\n Uncertainty(1, array([5.0, 4.7, 5.1, 5.2, 5.3])),\n Uncertainty(1, array([4.2, 4.3, 4.4, 4.3])),\n Uncertainty(1, array([4.0, 3.7, 4.4, 4.7])),\n Uncertainty(1, array([2.2, 1.7, 2.0, 4.3, 2.6])),\n Uncertainty(1, array([2.3, 1.2, 2.3, 4.4])),\n Uncertainty(1, array([3.2, 2.7, 3.0, 5.3]))]\n ks_expected = array([4.8, 6.8, 5.2, 5.6, 5.2, 12.10287443, 13.99394856, 12.48108926, 12.85930408])\n unck_expected = [Uncertainty(1, array([0.25, 0.25, 0.25, 0.25, 0.25])),\n Uncertainty(1, array([0.2644, 0.2644, 0.2644, 0.2644]))]\n idx_expected = {\"Nm\": [5, 4],\n \"cNm\": [0, 5, 9],\n \"Im\": [[0, 1], [1, 2]],\n \"sensors\": [-1, 1, 2],\n \"sensor_ms\": [1, 3, 3],\n \"n_sensor\": [0, 1, 1, 2, 1, 1, 2, 1, 1, 2],\n \"n_mu\": [1, 1, 2, 2, 1, 2, 2, 1, 2, 2],\n \"n_cov\": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],\n \"N_var\": [5, 5, 4, 4, 5, 4, 4, 5, 4, 4],\n \"idx\": [0, 5, 10, 14, 18, 23, 27, 31, 36, 40, 44],\n \"Ia\": [1, 1, 1, 2, 2, 2]}\n a_expected = array([1., 1.3, 0.002, 0.5, 1.1, 0.0005])\n w_matrices_expected = []\n u_matrices_expected = []\n\n ################################################################################################################\n # 3. Run Transform2NormInd.run()\n ################################################################################################################\n\n Transform2NormIndOp = Transform2NormInd()\n MatchUpTransform = Transform2NormIndOp.run(MatchUpTest)\n\n values_test = MatchUpTransform.values\n unc_test = MatchUpTransform.unc\n w_matrices_test = MatchUpTransform.w_matrices\n u_matrices_test = MatchUpTransform.u_matrices\n ks_test = MatchUpTransform.ks\n unck_test = MatchUpTransform.unck\n idx_test = MatchUpTransform.idx\n\n ################################################################################################################\n # 4. Compare retrieve values to expect values\n ################################################################################################################\n\n # Test transformed data object attribute by attribute\n\n # a. values\n for i, (value_expected, value_test) in enumerate(zip(values_expected, values_test)):\n self.assertAlmostEqual(value_expected, value_test, places=5, msg=str(i))\n\n # b. unc\n for block_unc_test, block_unc_expected in zip(unc_test, unc_expected):\n self.assertEqual(block_unc_expected.typeID, block_unc_test.typeID)\n self.assertEqual(block_unc_expected.uR.tolist(), block_unc_test.uR.tolist())\n\n # c. w_matrices\n self.assertEqual(w_matrices_test, w_matrices_expected)\n\n # d. u_matrices\n self.assertEqual(u_matrices_test, u_matrices_expected)\n\n # e. ks\n for k_expected, k_test in zip(ks_expected, ks_test):\n self.assertAlmostEqual(k_test.tolist(), k_expected.tolist(), places=5)\n\n # f. unck\n for block_unck_test, block_unck_expected in zip(unck_test, unck_expected):\n self.assertEqual(block_unck_expected.typeID, block_unck_test.typeID)\n self.assertEqual(block_unck_expected.uR.tolist(), block_unck_test.uR.tolist())\n\n # h. idx\n self.assertEqual(set(idx_expected.keys()), set(idx_test.keys()))\n for key in idx_expected.keys():\n idx_i_test = idx_test[key]\n idx_i_expected = idx_expected[key]\n if isinstance(idx_i_expected, ndarray):\n self.assertEqual(idx_i_test.tolist(), idx_i_expected.tolist())\n else:\n self.assertEqual(idx_i_test, idx_i_expected)\n\n # Test original data object preserved attribute by attribute\n\n # a. values\n for i, (value_original_expected, value_original_test) in enumerate(zip(MatchUpOriginal_expected.values, MatchUpTest.values)):\n self.assertAlmostEqual(value_original_expected, value_original_test, places=5)\n\n # b. unc\n for block_unc_original_expected, block_unc_original_test in zip(MatchUpOriginal_expected.unc, MatchUpTest.unc):\n self.assertEqual(block_unc_original_expected.typeID, block_unc_original_test.typeID)\n self.assertEqual(block_unc_original_expected.uR.tolist(), block_unc_original_test.uR.tolist())\n\n # c. w_matrices\n self.assertEqual(MatchUpOriginal_expected.w_matrices, MatchUpTest.w_matrices)\n\n # d. u_matrices\n self.assertEqual(MatchUpOriginal_expected.u_matrices, MatchUpTest.u_matrices)\n\n # e. ks\n for k_original_expected, k_original_test in zip(MatchUpOriginal_expected.ks, MatchUpTest.ks):\n self.assertAlmostEqual(k_original_test.tolist(), k_original_expected.tolist(), places=5)\n\n # f. unck\n for block_unck_original_expected, block_unck_original_test in zip(MatchUpOriginal_expected.unck, MatchUpTest.unck):\n self.assertEqual(block_unck_original_expected.typeID, block_unck_original_test.typeID)\n self.assertEqual(block_unck_original_expected.uR.tolist(), block_unck_original_test.uR.tolist())\n\n # h. idx\n self.assertEqual(set(MatchUpOriginal_expected.idx), set(MatchUpTest.idx))\n for key in MatchUpOriginal_expected.idx.keys():\n idx_i_original_test = MatchUpTest.idx[key]\n idx_i_original_expected = MatchUpOriginal_expected.idx[key]\n if isinstance(idx_i_original_expected, ndarray):\n self.assertEqual(idx_i_original_test.tolist(), idx_i_original_expected.tolist())\n else:\n self.assertEqual(idx_i_original_test, idx_i_original_expected)", "def test_order_by(self):\n manifestb = job_test_utils.create_seed_manifest(name='scale-batch-creator', jobVersion='2.0.0')\n job_type1b = job_test_utils.create_seed_job_type(manifest=manifestb)\n job_test_utils.create_job(job_type=job_type1b, status='RUNNING')\n\n manifestc = job_test_utils.create_seed_manifest(name='scale-batch-creator', jobVersion='3.0.0')\n job_type1c = job_test_utils.create_seed_job_type(manifest=manifestc)\n job_test_utils.create_job(job_type=job_type1c, status='RUNNING')\n\n url = '/%s/jobs/?is_superseded=false&order=job_type__name&order=-job_type__version' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 4)\n\n self.assertEqual(result['results'][0]['job_type']['id'], job_type1c.id)\n self.assertEqual(result['results'][1]['job_type']['id'], job_type1b.id)\n self.assertEqual(result['results'][2]['job_type']['id'], self.job_type1.id)\n self.assertEqual(result['results'][3]['job_type']['id'], self.job_type2.id)", "def pytest_finished_handling_group(session, worker):", "def pytest_can_run_together(item1, item2):", "def run_all_tests(self):\n for index in range(len(self.__test_set_list)):\n self.run_test(index)", "def test_get_scenarios(self):\n pass", "def test(self):\n test_dir = join_path(self.test_suite.current_test_cache_dir, self.test_src_dir)\n self.run_test(\n \"sh\",\n [\"testshortsort.sh\"],\n expected=\"Alignments sorted by coordinate.\",\n purpose=\"test: checking alignments\",\n work_dir=test_dir,\n )", "def run(self, result=None):\n method = getattr(self, self._testMethodName)\n\n if getattr(method, 'multi_graph', False) and not getattr(self, 'graphname', False):\n for graph in self.test_graphs:\n self.graphname = graph\n self.run(result=result)\n self.graphname = None\n else:\n super(BaseRexProTestCase, self).run(result=result)", "def assertManyResults(self, function, args, results):\r\n for arg, result in zip(args, results):\r\n if isinstance(arg, tuple):\r\n self.assertEqual(function(*arg), result)\r\n else:\r\n self.assertEqual(function(arg), result)", "def multiple_qc_test_run(pg_driver):\n # add\n with pg_driver.session_scope() as sxn:\n tr = models.qcreport.TestRun(project_id=\"CGCI-BLGSP\")\n tr.entity_id = str(uuid.uuid4())\n tr.test_type = \"aliquots\"\n tr.status = \"SUCCESS\"\n tr.is_stale = False\n sxn.add(tr)\n\n tr_2 = models.qcreport.TestRun(project_id=\"CGCI-BLGSP\")\n tr_2.entity_id = tr.entity_id\n tr_2.test_type = \"aliquots\"\n tr_2.status = \"ERROR\"\n tr_2.is_stale = False\n sxn.add(tr_2)\n\n yield\n\n # clean up\n cleanup_records(pg_driver, models.qcreport.TestRun, [tr.id, tr_2.id])", "def test_three_identical():\n run_mergesort([3, 3, 3], [3, 3, 3])", "def test_top_crasher_for_variant_analysis(self):\n self.testcases[0].job_type = 'some_type1'\n self.testcases[0].project_name = 'project1'\n self.testcases[0].crash_state = 'abcde'\n self.testcases[0].one_time_crasher_flag = False\n self.testcases[0].crash_type = 'top_crasher'\n self.testcases[0].security_flag = True\n\n self.testcases[1].job_type = 'some_type2'\n self.testcases[1].project_name = 'project1'\n self.testcases[1].crash_state = 'vwxyz'\n self.testcases[1].crash_type = 'crash_type2'\n self.testcases[1].one_time_crasher_flag = False\n self.testcases[1].security_flag = True\n\n for t in self.testcases:\n t.put()\n\n self.testcase_variants[1].job_type = 'some_type1'\n self.testcase_variants[1].crash_state = 'abcde'\n self.testcase_variants[1].crash_type = 'crash_type1'\n self.testcase_variants[1].testcase_id = self.testcases[1].key.id()\n self.testcase_variants[1].security_flag = True\n\n for v in self.testcase_variants:\n v.put()\n\n grouper.group_testcases()\n\n for index, t in enumerate(self.testcases):\n self.testcases[index] = data_handler.get_testcase_by_id(t.key.id())\n\n # Check none other testcases are grouped together.\n for testcase in self.testcases:\n self.assertEqual(testcase.group_id, 0)\n self.assertTrue(testcase.is_leader)", "def run_test(self):\n\n # populate *_ps sets\n self.enter_project_file()\n\n # populate *_dir sets\n self.enter_directories()\n\n # The files in the directories makes up the largest possible set of files\n self.result_files = self.result_files_dir\n self.design_files = self.design_files_dir\n self.design_space_files = self.design_space_files_dir\n self.test_bench_files = self.test_bench_files_dir\n\n # populate *_ms sets\n self.enter_meta_results_file()\n\n # populate *_OK sets\n self.check_analysis_status()\n\n df = {'design_files_dir' : list(self.design_files_dir),'design_files_pr' : list(self.design_files_pr),\n 'design_files_ms' : list(self.design_files_ms), 'design_files_OK' : list(self.design_files_OK)}\n\n ds = {'design_space_files_dir' : list(self.design_space_files_dir),\n 'design_space_files_pr' : list(self.design_space_files_pr)}\n\n rs = {'result_files_dir' : list(self.result_files_dir), 'result_files_ms' : list(self.result_files_ms),\n 'result_files_OK' : list(self.result_files_OK)}\n\n tb = {'test_bench_files_dir' : list(self.test_bench_files_dir),\n 'test_bench_files_ms' : list(self.test_bench_files_ms)}\n\n srl = SummaryReportsLinks(self.result_files_dir)\n\n lf = {'files_linked_from_sum_reps' : srl.get_files(),\n 'folders_linked_from_sum_reps' : srl.get_folders()}\n\n # 'test_bench_files_pr' : list(self.test_bench_files_pr),\n \n json_test = {'design_files' : df, 'design_space_files' : ds, 'result_files' : rs,\n 'test_bench_files' : tb, 'stat_files' : self.stat_files,\n 'files_linked_from_sum_reps' : lf}\n\n with open('test_run.json','wb') as f_out:\n json.dump(json_test, f_out, indent=4)", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def test_1():\n results = base_tests()\n assert type(results) is list\n assert type(results[0]) is dict\n assert len(results) == 3", "def test_extraction_with_multiple_query_result(self: Any,\n mock_method: Any) -> None:\n extractor = SQLAlchemyExtractor()\n extractor.results = ['test_result', 'test_result2', 'test_result3']\n extractor.init(Scoped.get_scoped_conf(conf=self.conf,\n scope=extractor.get_scope()))\n result = [extractor.extract() for _ in range(3)]\n\n self.assertEqual(len(result), 3)\n self.assertEqual(result,\n ['test_result', 'test_result2', 'test_result3'])", "def process_results(_load_manifest, _stma_report):\n _junit_results = []\n # the first \"test\" is that the two lists should have the same number of items\n if len(_load_manifest) == len(_stma_report):\n print(\"result count test: pass\")\n else:\n print(\"result count test: fail\")\n\n # for the rest, each item in the load manifest equates to a test\n for _load_item in _load_manifest:\n _pass = True\n # get its associated entry from the _stma_report\n _stma_item = find_first_match(_stma_report, _load_item)\n if _stma_item is None:\n _pass = False\n print(\"test \" + str(_load_item) + \": fail due to missing stma result\")\n else:\n # verify details reported by stma\n _pass = equivalent_dicts(_load_item, _stma_item) and\\\n equivalent_dicts(_stma_item, _load_item)\n if not _pass:\n print(\"test \" + str(_load_item) + \": fail due to mismatching result\")\n print(\"test \" + str(_load_item) + \": \" + str(_pass))\n\n return _junit_results", "def test_sort_cards(a_list, result):\n assert sort_cards(a_list) == result", "def test_insertSort(self):\n\t\tsortObj=insertSort()\n\t\tself.assertEqual(sortObj.run_sort(self.test_1[0]),self.test_1[1])", "def add_result(self, test_ids, status, comment):\n for test_id in test_ids:\n data = {\n 'case_id': test_id,\n 'comment': comment,\n 'status_id': status,\n }\n self.results.append(data)", "def run_tests(tests):\n return [test(t) for t in tests]", "def pytest_collection_modifyitems(config, items):\n # check if studio tests myst be skipped\n run_study = config.getoption(\"--runstudy\")\n # 'all' will match all studies, '' will not match anything\n run_study = {'': '(?!x)x', 'all': '.*'}.get(run_study, run_study)\n # --runstudy given in cli: do not skip study tests and\n test_selected = list()\n test_skipped = list()\n groups = dict()\n incremental = pytest.mark.incremental()\n\n def add():\n \"helper for gathering test info\"\n marker = item.get_marker(mark)\n kwargs = parse_args(marker.args, marker.kwargs)\n group_name = kwargs['name']\n group = groups.setdefault(group_name, dict())\n group.setdefault(mark, list()).append((kwargs, item))\n item.add_marker(incremental)\n\n # place every test in regular, prerequisite and studies\n # group by name\n for item in items:\n for mark in set(item.keywords.keys()).intersection(MARKS):\n add()\n break\n else:\n test_selected.append(item)\n\n def sort(a, b):\n \"Sort two items by order priority\"\n return cmp(a[0]['order'], b[0]['order'])\n\n # use studies precedence to built the global sequence order\n mandatory = 'study' # mandatory mark for global sorting: study\n studies = list()\n for name, info in groups.items():\n studies.extend(info.get(mandatory, []))\n studies.sort(sort)\n\n def append(tests, where):\n \"helper to add the test item from info structure\"\n for test in tests:\n test = test[1]\n if test not in where:\n where.append(test)\n\n # select only the test that are going to be launched\n width = 0\n regexp = re.compile(run_study, re.I | re.DOTALL)\n for study in studies:\n group_name = study[0]['name']\n width = max(width, len(group_name))\n where = test_selected if regexp.search(group_name) else test_skipped\n for mark, seq in groups[group_name].items():\n if mark == mandatory:\n continue\n seq.sort(sort)\n append(seq, where)\n append([study], where)\n\n if config.getoption(\"--show_order\") or config.getoption(\"--debug\"):\n fmt = \"{0:>3d} [{1:>%s}] {2}\" % width\n for i, item in enumerate(test_selected + test_skipped):\n study = get_study_name(item)\n fqn = get_FQN(item)\n line = fmt.format(i, study, fqn)\n if item in test_selected:\n line = term.green('+' + line)\n else:\n line = term.yellow('-' + line)\n print(line)\n\n # we make the --runstudy check at the end to be able to show\n # test order with --show_order or --debig options\n # reorder tests by group name and replace items IN-PLACE\n if run_study:\n items[:] = test_selected\n return\n\n skip_test = pytest.mark.skip(reason=\"need --runstudy option to run\")\n for item in items:\n if set(item.keywords.keys()).intersection(MARKS):\n item.add_marker(skip_test)", "def _test(self, **kwargs):\n raise ValueError(\"This function is not available in lazy results evaluation as it would \"\n \"require all pairwise tests to be performed.\")", "def __call__(self, result=None):\n try:\n self._pre_setup()\n super(TestCase, self).__call__(result)\n finally:\n self._post_teardown()", "def test_2():", "def test_two_ordered():\n run_mergesort([1, 2], [1, 2])", "def test_4():", "def test_serial_runs(make_runner: Callable[..., TargetFunctionRunner]) -> None:\n runner = make_runner(target_delayed, use_instances=True)\n\n run_info = TrialInfo(config=2, instance=\"test2\", seed=0, budget=0.0)\n runner.submit_trial(run_info)\n\n run_info = TrialInfo(config=3, instance=\"test3\", seed=0, budget=0.0)\n runner.submit_trial(run_info)\n\n results = runner.iter_results()\n\n first = next(results, None)\n assert first is not None\n\n second = next(results, None)\n assert second is not None\n\n # To make sure runs launched serially, we just make sure that the end time of a run\n # is later than the other # Results are returned in left to right\n _, first_run_value = first\n _, second_run_value = second\n assert int(first_run_value.endtime) <= int(second_run_value.starttime)", "def test_get_run(self):\n pass", "def test_all():\n test_get_to()\n test_error_type()\n test_exchange()\n print(\"All tests passed.\")", "def run_tests(self):\n raise NotImplementedError", "def pytest_after_group_items(session, config, items):", "def test_videos_default_ordering(mocker, logged_in_apiclient):\n mocker.patch(\"ui.serializers.get_moira_client\")\n mocker.patch(\"ui.utils.get_moira_client\")\n VideoSetPagination.page_size = 5\n client, user = logged_in_apiclient\n collection = CollectionFactory(owner=user)\n VideoFactory.create_batch(10, collection=collection)\n url = reverse(\"models-api:video-list\")\n p1_response = client.get(\"{}?page=1\".format(url))\n assert len(p1_response.data[\"results\"]) == 5\n for i in range(4):\n current_video_date = p1_response.data[\"results\"][i][\"created_at\"]\n next_video_date = p1_response.data[\"results\"][i + 1][\"created_at\"]\n assert current_video_date >= next_video_date\n\n p2_response = client.get(\"{}?page=2\".format(url))\n last_entry_data = p1_response.data[\"results\"][-1][\"created_at\"]\n first_entry_data = p2_response.data[\"results\"][0][\"created_at\"]\n assert last_entry_data >= first_entry_data\n for i in range(4):\n current_video_date = p2_response.data[\"results\"][i][\"created_at\"]\n next_video_date = p2_response.data[\"results\"][i + 1][\"created_at\"]\n assert current_video_date >= next_video_date", "def setUp(self):\n assert COMMANDS.keys() == EXPCT_RESULTS.keys()\n self.tests = []\n self.test_numbers = deque(sorted(COMMANDS.keys()))", "def test_get_scenarios_expanded(self):\n pass", "def test_three_split():\n run_mergesort([3, 0, 3], [0, 3, 3])", "def execute_tests():\n\n if len(sys.argv) > 1:\n # Filter test list based on command line requests\n tests_to_run = []\n for requested in sys.argv[1:]:\n for func, param in registered_tests:\n if param == requested:\n tests_to_run += [(func, param)]\n break\n else:\n print('Unknown test ' + requested)\n sys.exit(1)\n else:\n tests_to_run = registered_tests\n\n failing_tests = []\n for func, param in tests_to_run:\n print(param + (' ' * (OUTPUT_ALIGN - len(param))), end='')\n sys.stdout.flush()\n try:\n func(param)\n print(COLOR_GREEN + 'PASS' + COLOR_NONE)\n except KeyboardInterrupt:\n sys.exit(1)\n except TestException as exc:\n print(COLOR_RED + 'FAIL' + COLOR_NONE)\n failing_tests += [(param, exc.args[0])]\n except Exception as exc: # pylint: disable=W0703\n print(COLOR_RED + 'FAIL' + COLOR_NONE)\n failing_tests += [(param, 'Test threw exception:\\n' +\n traceback.format_exc())]\n\n if failing_tests:\n print('Failing tests:')\n for name, output in failing_tests:\n print(name)\n print(output)\n\n print(str(len(failing_tests)) + '/' +\n str(len(tests_to_run)) + ' tests failed')\n if failing_tests != []:\n sys.exit(1)", "def setup(self, options, results):", "def _run_tests(self):\n for pyunit_testcase in self.cfg.testcases:\n yield self._run_testsuite(pyunit_testcase)", "def _makeResult(self):\n\n result = super(CustomTextTestRunner, self)._makeResult()\n result.test_case_count = self.test_case_count\n return result", "def test_execute_pipeline_one(self):\n task_list = [NumberOne(), NumberTwo(), NumberThree()]\n execute_pipeline(task_list)\n self.assertEqual(\"Task Two\", task_list[2].data_frame)", "def test_results(self, affiliate_items):\n processed_count = 0\n error_count = 0\n\n updater = mock.Mock()\n batch_job = BatchJob(affiliate_items, updater)\n\n for result in batch_job.run():\n processed_count += 1\n error_count += int(result.is_error)\n\n assert updater.call_count == 4\n assert processed_count == 4\n assert error_count == 0", "def main():\n zipper_two_lists_tests()\n zipper_two_tuples_tests()\n zipper_list_and_tuple_tests()", "def run_test_cases(self):\n count = 1\n for test_case in self.test_cases:\n print(\"Running test case #%d\" % count)\n if test_case.name == 'RouteDistance':\n distance = self.get_distance_for_route(test_case.args)\n print('%s distance: %s' % (test_case.args, distance))\n elif test_case.name == 'RouteShortest':\n args = test_case.args.split('|')\n shortest_distance = self.find_shortest_path_between_cities(args[0], args[1])\n print(\"Shortest distance between %s and %s: %d\" % (args[0], args[1], shortest_distance))\n elif test_case.name == 'RouteLessThanHops':\n args = test_case.args.split('|')\n paths = self.trips_hop_constraint_bfs(args[0], args[1], int(args[2]))\n print('Paths between %s and %s with hops less than or equal to %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n elif test_case.name == 'RouteEqualHops':\n args = test_case.args.split('|')\n paths = self.trips_hop_constraint_bfs(args[0], args[1], int(args[2]), equal=True)\n print('Paths between %s and %s with hops equal to %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n elif test_case.name == 'RouteLessThanDistance':\n args = test_case.args.split('|')\n paths = self.trips_distance_constraint_bfs(args[0], args[1], int(args[2]))\n print('Paths between %s and %s with distance less than %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n else:\n raise Exception('Unknown test case: %s' % test_case.name)\n count += 1\n print()", "async def _method_subtests(self, method, test_values, default_header):\n for test_value in test_values:\n api_response = test_value[\"api response\"]\n expected_lines = test_value[\"expected_lines\"]\n\n with self.subTest(method=method, api_response=api_response, expected_lines=expected_lines):\n self.bot.api_client.get.return_value = api_response\n\n expected_output = \"\\n\".join(expected_lines)\n actual_output = await method(self.member)\n\n self.assertEqual((default_header, expected_output), actual_output)", "def assert_console_output_ordered(self, *output, **kwargs):\n self.assertEqual(list(output), self.execute_console_task(**kwargs))", "def run_tests():\n parser = ArgumentParser()\n parser.add_argument('name',nargs='?',default=None,help=\"Suite or test name\")\n parser.add_argument('-b','--bin-dir',help=\"Directory where Firebird binaries tools are\")\n parser.add_argument('-d','--db-dir',help=\"Directory to use for test databases\")\n parser.add_argument('--archive',action='store_true',help=\"Save last run results to archive\")\n parser.add_argument('--rerun',action='store_true',help=\"Run only tests that don't PASSed in last run\")\n parser.add_argument('--untested',action='store_true',help=\"Run only tests that were UNTESTED in last run\")\n parser.add_argument('-v','--verbose',action='store_true',help=\"Be more verbose\")\n parser.add_argument('--verbosity',type=int,choices=[0,1,2],default=1,help=\"Set verbosity; --verbosity=2 is the same as -v\")\n parser.add_argument('-q','--quiet',action='store_true',help=\"Be less verbose\")\n parser.add_argument('-x','--xunit',action='store_true',help=\"Provides test results also in the standard XUnit XML format\")\n parser.add_argument('-e','--expect',type=str,metavar=\"FILENAME\",help=\"Test results file to be used as expeted outcomes\")\n if rpyc_available:\n parser.add_argument('--remote',action='store_true',help=\"Connect to remote fbtest server\")\n\n parser.add_argument('-u','--update',action='store_true',help=\"Update last run results with re-run results\")\n parser.add_argument('-w','--password',help=\"SYSDBA password\")\n parser.add_argument('-o','--host',help=\"Remote Firebird or fbtest host machine identification\")\n parser.add_argument('-p','--person',help=\"QA person name\")\n parser.add_argument('-a','--arch',help=\"Firebird architecture: SS, CS, SC, EM\")\n parser.add_argument('-s','--sequence',type=int,help=\"Run sequence number for this target\")\n parser.add_argument('-k','--skip',help=\"Suite or test name or name of file with suite/test names to skip\")\n parser.add_argument('-c','--client',help=\"Use specified Firebird client library\")\n parser.set_defaults(rerun=False,untested=False,update=False,server=False,register=False,\n remote=False,host='localhost',password='masterkey',\n sequence=1,arch='SS',person=UNKNOWN)\n\n script_runner.run_tests(parser.parse_args())", "def test_run_and_restore(self):\n # Run for 5 algo-calls\n testargs = [\"python\", \"scripts/smac\", \"--scenario_file\",\n self.scenario_one, \"--verbose\", \"DEBUG\"]\n with mock.patch.object(sys, 'argv', testargs):\n self.smaccli.main_cli()\n # Increase limit and run for 10 (so 5 more) by using restore_state\n testargs = [\"python\", \"scripts/smac\", \"--restore_state\",\n self.output_one, \"--scenario_file\",\n self.scenario_two, \"--verbose\", \"DEBUG\"]\n with mock.patch.object(sys, 'argv', testargs):\n self.smaccli.main_cli()", "def test_customize_test_loads(self):\n self.create_user_with_role(\n self.user.name, self.user.email, self.user.password, Role.tester)\n self.create_forktest(\"own-fork-commit\", TestPlatform.linux, regression_tests=[2])\n self.create_completed_regression_t_entries(3, [2])\n response = self.app.test_client().get('/test/3')\n self.assertEqual(response.status_code, 200)\n self.assert_template_used('test/by_id.html')\n regression_tests = RegressionTest.query.all()\n self.assertIn(regression_tests[1].command, str(response.data))\n self.assertNotIn(regression_tests[0].command, str(response.data))", "def prepare(self):\n for scenario_result, scenario_pass, case_pass in self.iterate():\n for step_result in scenario_result.step_results:\n step_pass = step_result.success\n url, method = step_result.fetch.url, step_result.fetch.method\n params = step_result.fetch.kwargs.get(\"params\")\n method_report = self.get_method_report(url, method)\n if method_report:\n method_report.add(\n case_pass, scenario_pass, step_pass, params\n )", "def test_run(self, init_event, mocker):\n mocker.patch.object(\n houdini_toolbox.events.event.HoudiniEvent,\n \"enabled\",\n new_callable=mocker.PropertyMock(return_value=True),\n )\n mock_stats = mocker.patch.object(\n houdini_toolbox.events.event.HoudiniEvent,\n \"stats\",\n new_callable=mocker.PropertyMock,\n )\n mock_item_map = mocker.patch.object(\n houdini_toolbox.events.event.HoudiniEvent,\n \"item_map\",\n new_callable=mocker.PropertyMock,\n )\n\n mock_stats.return_value = mocker.MagicMock(\n spec=houdini_toolbox.events.stats.HoudiniEventStats\n )\n\n mock_map = {}\n mock_item_map.return_value = mock_map\n\n event = init_event()\n\n mock_item1 = mocker.MagicMock(spec=houdini_toolbox.events.item.HoudiniEventItem)\n mock_item1.run.side_effect = lambda sa: sa[\"order\"].append(mock_item1)\n\n mock_item2 = mocker.MagicMock(spec=houdini_toolbox.events.item.HoudiniEventItem)\n mock_item2.run.side_effect = lambda sa: sa[\"order\"].append(mock_item2)\n\n mock_item3 = mocker.MagicMock(spec=houdini_toolbox.events.item.HoudiniEventItem)\n mock_item3.run.side_effect = lambda sa: sa[\"order\"].append(mock_item3)\n\n # Assign objects to event map with priorities.\n mock_map[0] = [mock_item2]\n mock_map[15] = [mock_item3]\n mock_map[5] = [mock_item1]\n\n scriptargs = {\"key\": \"value\", \"order\": []}\n\n expected_scriptargs = {\n \"key\": \"value\",\n # We expect events to be run in decreasing priority order\n \"order\": [mock_item3, mock_item1, mock_item2],\n }\n\n # Run the test event.\n event.run(scriptargs)\n\n # Make sure each thing was ran.\n mock_item1.run.assert_called_once()\n mock_item2.run.assert_called_once()\n mock_item3.run.assert_called_once()\n\n assert scriptargs == expected_scriptargs\n\n # Ensure the context manager was called.\n mock_stats.return_value.__enter__.assert_called_once()\n mock_stats.return_value.__exit__.assert_called_once()", "def test_integration3(self):\n self._test_integration(3)", "def run_tests(remit, sourcelist):\n for source in sourcelist:\n # - move into source's directory\n os.chdir(source)\n # - build worklist of commands\n commands = list()\n commands += test_matrix(remit, source)\n commands += extra_tests(remit, source)\n commands = remove_blacklist(remit, source, commands)\n # - run the commands\n for i, command in enumerate(commands):\n print('[test %s: %s of %d] %s'\n % (source,\n str(i+1).rjust(len(str(len(commands)))),\n len(commands),\n ' '.join(command)))\n subprocess.call(command)\n # - move out of source's directory\n os.chdir('..')", "def test_execute_xia_automated_workflow(self, mock_run):\n self.assert_(execute_xia_automated_workflow.run())\n\n self.assert_(execute_xia_automated_workflow.run())\n self.assertEqual(mock_run.call_count, 2)\n\n self.assert_(execute_xia_automated_workflow.run())\n self.assertEqual(mock_run.call_count, 3)", "def main():\n num_of_tests = int(input())\n\n # iterate over test cases\n for test_case in range(1, num_of_tests + 1):\n result = handle_case()\n printable_result = handle_result(result)\n print(\"Case #{}: {}\".format(test_case, printable_result))", "def test_concurrent_add_and_delete_pending_test_case_result(self):\n result = xml_reporter._TextAndXMLTestResult(None, self.stream, None, 0,\n None)\n def add_and_delete_pending_test_case_result(test_name):\n test = MockTest(test_name)\n result.addSuccess(test)\n result.delete_pending_test_case_result(test)\n\n for i in range(50):\n add_and_delete_pending_test_case_result('add_and_delete_test%s' % i)\n self.assertEqual(result.pending_test_case_results, {})", "def test_results_are_sorted(self, data_flow_api_client):\n response = data_flow_api_client.get(self.view_url)\n assert response.status_code == status.HTTP_200_OK\n\n results = response.json()['results']\n\n assert results == sorted(results, key=lambda t: t['id'])", "def test_importer_returns_tests():\n flowtask = FlowTaskFactory()\n flowtask.build_flow.build.org = OrgFactory()\n with temporary_dir() as output_dir:\n copyfile(\n TEST_ROBOT_OUTPUT_FILES / \"robot_with_failures.xml\",\n Path(output_dir) / \"output.xml\",\n )\n actual = robot_importer.import_robot_test_results(flowtask, output_dir)\n expected = [\n {\n \"name\": \"Passing test\",\n \"group\": \"Robot Fail\",\n \"status\": \"Pass\",\n \"start_time\": \"2020-06-23T18:49:20.955000+00:00\",\n \"end_time\": \"2020-06-23T18:49:20.956000+00:00\",\n \"exception\": \"Life is good, yo.\",\n \"doc\": \"\",\n \"tags\": [\"tag one\", \"tag two\"],\n },\n {\n \"name\": \"Failing test 1\",\n \"group\": \"Robot Fail\",\n \"status\": \"Fail\",\n \"start_time\": \"2020-06-23T18:49:20.957000+00:00\",\n \"end_time\": \"2020-06-23T18:49:20.960000+00:00\",\n \"exception\": \"Danger, Will Robinson!\",\n \"doc\": \"A test that fails with a keyword directly in the test\",\n \"tags\": [],\n },\n {\n \"name\": \"Failing test 2\",\n \"group\": \"Robot Fail\",\n \"status\": \"Fail\",\n \"start_time\": \"2020-06-23T18:49:20.960000+00:00\",\n \"end_time\": \"2020-06-23T18:49:20.963000+00:00\",\n \"doc\": \"A test that fails due to a failure in a lower level keyword.\",\n \"exception\": \"I'm sorry, Dave. I'm afraid I can't do that.\",\n \"tags\": [],\n },\n {\n \"name\": \"Failing test 3\",\n \"group\": \"Robot Fail\",\n \"status\": \"Fail\",\n \"start_time\": \"2020-06-23T18:49:21.017000+00:00\",\n \"end_time\": \"2020-06-23T18:49:21.024000+00:00\",\n \"exception\": (\n \"Several failures occurred:\\n\\n\"\n \" 1) First failure\\n\\n\"\n \" 2) Second failure\"\n ),\n \"doc\": \"A test that has multiple keyword failures\",\n \"tags\": [],\n },\n ]\n assert actual == expected", "def run_all(self):\n failures, errors = [], []\n\n # Run each test case registered with us and agglomerate the results.\n for case_ in self.cases:\n case_.run()\n update_results(failures, errors, case_)\n\n # Display our results.\n print_errors(errors)\n print_failures(failures)\n print_overview(errors, failures)\n\n # Exit with 0 if all tests passed, >0 otherwise.\n sys.exit(len(failures) + len(errors))", "def run(self, result=None):\n\n with self.env_wrap():\n super(RelengToolTestCase, self).run(result)", "def test_present_next_result_displays_result(self):\n # to test this we don't actually need to write to the database,\n # we just need a list of ordered_dicts in menu.records\n test_records = [\n OrderedDict([\n ('name', 'Test Employee 1'),\n ('date', datetime.date(2018, 5, 1)),\n ('task_name', 'Test Task 1'),\n ('duration', 1),\n ('notes', 'This is a note for the first test task')\n ]),\n OrderedDict([\n ('name', 'Test Employee 2'),\n ('date', datetime.date(2018, 5, 2)),\n ('task_name', 'Test Task 2'),\n ('duration', 2),\n ('notes', 'This is a note for the second test task')\n ]),\n OrderedDict([\n ('name', 'Test Employee 3'),\n ('date', datetime.date(2018, 5, 3)),\n ('task_name', 'Test Task 3'),\n ('duration', 3),\n ('notes', 'This is a note for the third test task')\n ]),\n ]\n self.menu.records = test_records\n self.menu.current_record = 1\n line0 = test_records[1]['name'] + \"\\n\"\n f_date = test_records[1]['date'].strftime(\"%Y-%m-%d\")\n f_task_name = test_records[1]['task_name']\n line1 = \"{}: {}\".format(f_date, f_task_name)\n line2 = \"\\n\" + (\"-\" * len(line1)) + \"\\n\"\n f_time_taken = str(test_records[1]['duration'])\n line3 = \"{} minutes\\n\".format(f_time_taken)\n line4 = \"{}\\n\".format(test_records[1]['notes'])\n long_form = (line0 +\n line1 +\n line2 +\n line3 +\n line4\n )\n expected_output = (long_form +\n \"\\n\" +\n \"Available actions:\\n\" +\n \"p) Previous\\n\" +\n \"n) Next\\n\" +\n \"b) Back to list view\\n\" +\n \"e) Edit\\n\" +\n \"d) Delete\\n\" +\n \"m) go back to Main menu\\n\" +\n \"q) quit\\n\")\n\n # Create a StringIO object to be a capture object\n captured_output = io.StringIO()\n # point stdout at the capture object\n sys.stdout = captured_output\n # Do anything that's going to have a print statement\n # (these will be accumulated in the captured_output object)\n example_input = 'q'\n with patch('builtins.input', side_effect=example_input):\n self.menu.present_next_result()\n\n # Revert stdout (captured_output still holds the captured items)\n sys.stdout = sys.__stdout__\n # Do any other test code (e.g., asserts)\n self.assertEqual(expected_output, captured_output.getvalue())", "def test_suite():\n test(sum_upto_first_even([1,3,2]),4)\n test(sum_upto_first_even([1,3,3]),7)\n test(sum_upto_first_even([2,3,3]),0)", "def inner_test():\n pass" ]
[ "0.8005941", "0.7942821", "0.6458603", "0.6281796", "0.61934733", "0.6125403", "0.61176115", "0.6106937", "0.6105472", "0.6088446", "0.6071222", "0.6060519", "0.60560644", "0.6031968", "0.60201895", "0.60119915", "0.59998757", "0.5996369", "0.5989808", "0.5985094", "0.5966215", "0.5933381", "0.59317553", "0.5927337", "0.5876174", "0.5826675", "0.58211637", "0.5817424", "0.5800586", "0.5793413", "0.5788336", "0.57867175", "0.57690907", "0.57669306", "0.57498085", "0.57426715", "0.5740818", "0.5736736", "0.57328224", "0.5729942", "0.57222164", "0.5721344", "0.5700642", "0.568049", "0.5675054", "0.5665107", "0.5664765", "0.5660319", "0.5651027", "0.56391656", "0.56374127", "0.5627211", "0.56262094", "0.56220126", "0.56149703", "0.5614783", "0.56136435", "0.56112504", "0.56065595", "0.56007355", "0.55894214", "0.55843437", "0.55831766", "0.5580953", "0.5580024", "0.5574998", "0.5571679", "0.55708104", "0.5567552", "0.5561905", "0.55565125", "0.5551125", "0.55479354", "0.55388665", "0.553857", "0.55382884", "0.5534139", "0.55331004", "0.55261403", "0.5524246", "0.55238914", "0.5518823", "0.5511905", "0.55116796", "0.5509305", "0.5508882", "0.5503978", "0.5495419", "0.5493257", "0.5492059", "0.5491778", "0.5491654", "0.54894227", "0.54862857", "0.54818046", "0.5481268", "0.54774904", "0.5476396", "0.54739165", "0.54662734" ]
0.84070575
0
A Helper to create TestResult
def _create_test_result(self, **kwargs): test_info = test_runner_base.TestResult(**RESULT_TEST_TEMPLATE._asdict()) return test_info._replace(**kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _makeResult(self):\n\n result = super(CustomTextTestRunner, self)._makeResult()\n result.test_case_count = self.test_case_count\n return result", "def getTestResults():", "def create_success(test, time):\n return _TestInfo(test, time)", "def create_result(main_test):\n result = Result(outputs=[DBHandler.NAME], main_test=main_test)\n result.startTestRun()\n return result", "def test_get_results(self):\n pass", "def _create_result(\n self, raw_data: T, processed_data: List[TestGroupReport]\n ) -> ImportedResult:\n raise NotImplementedError", "def test(self):\n return self._test(result_count=1, failure_amount=1)", "def test(self):\n return self._test(result_count=1, failure_amount=1)", "def _CreateTestResult(self) -> paranoid_pb2.TestResultsEntry:\n\n if self.severity is None:\n raise KeyError(\"Please specify self.severity for %s.\" % self.check_name)\n return paranoid_pb2.TestResultsEntry(\n severity=self.severity, test_name=self.check_name, result=False)", "def make_final_result(test_result, steps, begin_time):\n import time\n import pytest\n if pytest.current_exception:\n steps[-1].set_status(Status.FAILED, pytest.current_exception)\n test_failed = False\n for step in steps:\n test_result.add_step(step)\n if step.get_status() == Status.FAILED:\n print('%s: ' % str(step.get_id()) + constant.Color.FAIL +\n 'failed\\nMessage: ' + step.get_message() +\n constant.Color.ENDC)\n test_failed = True\n\n if not test_failed:\n test_result.set_test_passed()\n else:\n test_result.set_test_failed()\n\n test_result.set_duration(time.time() - begin_time)\n test_result.write_result_to_file()", "def test_student_do_homework_positive():\n assert isinstance(result_1, HomeworkResult)", "def _create_result(\n self, raw_data: Element, processed_data: List[TestGroupReport]\n ) -> GTestImportedResult:\n return GTestImportedResult(\n name=self.name,\n results=processed_data,\n description=self.description,\n )", "def _get_result(self, test_result_file):\n\t\tresult = {}\n\n\t\txml_obj = xml.dom.minidom.parse(test_result_file)\n\t\tif not xml_obj.getElementsByTagName(\"completed\"):\n\t\t\tsys.stderr.write(\n\t\t\t\t\"File has empty result...removing %s\\n\" % test_result_file)\n\t\t\tos.remove(test_result_file)\n\t\t\treturn\n\n\t\tinca_resource = amass.xml_tag_value(xml_obj, \"resourceHostname\")\n\t\tresult[\"SOURCE_RESOURCE\"] = self._normalize_resource(inca_resource)\n\t\ttry:\n\t\t\tinca_resource = amass.xml_tag_value(xml_obj, \"targetHostname\")\n\t\t\tresult[\"TARGET_RESOURCE\"] = self._normalize_resource(inca_resource)\n\t\texcept:\n\t\t\tresult[\"TARGET_RESOURCE\"] = result[\"SOURCE_RESOURCE\"]\n\t\tresult[\"TEST_NAME\"] = amass.xml_tag_value(xml_obj, \"nickname\")\n\t\tresult[\"COLLECTED_DATE\"] = amass.string2datetime(amass.xml_tag_value(xml_obj, \"gmt\"))\n\t\tresult[\"RESULT\"] = None\n\t\terror = None\n\n\t\ttry:\n\t\t\terror = amass.xml_tag_value(xml_obj, \"errorMessage\")\n\t\texcept:\n\t\t\tpass\n\t\ttry:\n\t\t\tcr = amass.xml_tag_value(xml_obj, \"comparisonResult\")\n\t\t\tif cr == 'Success':\n\t\t\t\tresult[\"RESULT\"] = True\n\t\t\telse:\n\t\t\t\terror = cr if error is None else \"%s: %s\" % (cr, error)\n\t\t\t\tresult[\"RESULT\"] = False\n\t\texcept:\n\t\t\tcompleted = amass.xml_tag_value(xml_obj, \"completed\")\n\t\t\tif completed == 'true':\n\t\t\t\tresult[\"RESULT\"] = True\n\t\t\telse:\n\t\t\t\tresult[\"RESULT\"] = False\n\n\t\tif error:\n\t\t\terror.replace(\"'\", \"\")\n\t\tresult[\"ERROR_MSG\"] = error\n\n\t\treturn result", "def run( self, test ):\n\n result = self._makeResult()\n test( result )\n result.printErrors()\n self.stream.writeln( result.separator2 )\n run = result.testsRun\n self.stream.writeln()\n\n if not result.wasSuccessful():\n self.stream.write( \"FAILED (\" )\n failed, errored = map( len, ( result.failures, result.errors ) )\n if failed:\n self.stream.write( \"failures=%d\" % failed )\n if errored:\n if failed: self.stream.write( \", \" )\n self.stream.write( \"errors=%d\" % errored )\n self.stream.writeln( \")\" )\n else:\n self.stream.writeln( \"OK\" )\n \n return result", "def test_return_class_content_by_accepts(self,request,**kwargs):\n \n class TestReturn:\n \"\"\"Test return class\"\"\"\n def __init__(self):\n self.__t1 = 'Test'\n \n t1 = TestReturn()\n t1.test1 = 'Test1'\n \n t2 = TestReturn()\n t2.test2=\"Test2\"\n return (t1,t2)", "def test_get_results_verbose(self):\n\t\tpass", "def test_formatResult(self):\r\n x = self.FWP({'x': 3})\r\n self.assertEqual(x.formatResult(3), '3')", "def test_make_results_simple(self):\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\ttest.do_run()\n\t\ttest.make_results_simple()\n\t\tobj_ut = test.results_simple\n\t\tself.assertEqual(obj_ut, {'.text id': '100', '.text score': -1, \n\t\t\t'total wordcount': 7, 'total hits': 2, 'pos hits': 0,\n\t\t\t'neg hits': 2})", "def make_final_result(test_result, steps, begin_time, logger):\n import time\n from .constant import Colors\n from libraries.result import Status\n for step in steps:\n test_result.add_step(step)\n if step.get_status() == Status.FAILED:\n print('%s: ' % str(step.get_id()) + Colors.FAIL + 'failed\\nMessage: ' + step.get_message() + Colors.ENDC)\n test_result.set_test_failed()\n\n test_result.set_duration(time.time() - begin_time)\n test_result.write_result_to_file()\n logger.save_log(test_result.get_test_status())", "def generateFinalResult(self):\n if self.__testResult == 'FAIL':\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n elif self.__testResult == 'PASS':\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY)\n elif self.__testResult == 'NONE':\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY) \n self.__testResult = 'PASS'\n #else:\n total_count = int(TestScriptSymbolTable.get_value_from_sym_tab(\"total_count\", TestScriptSymbolTable.test_result_tab))\n pass_count = int(TestScriptSymbolTable.get_value_from_sym_tab(\"pass_count\", TestScriptSymbolTable.test_result_tab))\n fail_count = int(TestScriptSymbolTable.get_value_from_sym_tab(\"fail_count\", TestScriptSymbolTable.test_result_tab))\n conditional_chk_flag = int(TestScriptSymbolTable.get_value_from_sym_tab(\"conditional_chk_flag\", TestScriptSymbolTable.test_result_tab))\n num_of_pass_required = int(TestScriptSymbolTable.get_value_from_sym_tab(\"num_of_pass_required\", TestScriptSymbolTable.test_result_tab))\n \n if total_count >= 1:\n if conditional_chk_flag == 1:\n if num_of_pass_required <= pass_count:\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'PASS'\n else:\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'FAIL'\n else:\n if fail_count > 0:\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'FAIL'\n else:\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'PASS'\n else:\n if GlobalConfigFiles.curr_tc_name != \"\":\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n logging.debug(\"\\n TEST COMPLETED without FINAL RESULT...\")\n\n self.__testResult = 'FAIL'\n\n self.tmsPacket.TestResult = self.__testResult\n if GlobalConfigFiles.curr_tc_name != \"\":\n logging.info(\"\\n FINAL TEST RESULT ---> %15s\", self.__testResult)\n logging.info(' END: TEST CASE [%s]', GlobalConfigFiles.curr_tc_name)\n\n Util.set_color(Util.FOREGROUND_WHITE)\n GlobalConfigFiles.test_result = self.__testResult\n\n self.tmsPacket.TimeStamp = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime())\n if GlobalConfigFiles.curr_tc_name != \"\":\n self.tmsPacket.writeTMSJson()\n\n return", "def createMakingTest(tx, query, personId, testId, date, hour, result):\n tx.run(query, personId=personId, testId=testId, date=date, hour=hour, result=result)", "def _get_result(driver):\n try: \n \tresult = oval.OVALResult(driver.request.title, driver.execute_tests())\n \tcurrent_app.logger.info(time.ctime() + \"\\tOVAL Result created for %s\" % result.title)\n\n \treturn result\n except oval.OVALDriverError as e:\n\tflash(str(e))", "def generate_mock_result(project='TEST', repository=None, status='SUCCESS', success=True, run_id=1,\n timestamp=None):\n if not timestamp: # If no time provided, use right now.\n timestamp = str(int(time.time() * 1000))\n if not repository:\n repository = '{}-repo'.format(project.lower())\n result = dict(project=project, repository=repository, status=status, success=success, run_id=run_id,\n timestamp=timestamp, id='{}{}'.format(repository, run_id))\n return result", "def _actionTestSetDetailsFromResult(self):\n from testmanager.core.testresults import TestResultData;\n from testmanager.core.testset import TestSetData;\n idTestResult = self.getIntParam(TestSetData.ksParam_idTestResult);\n oTestResultData = TestResultData().initFromDbWithId(self._oDb, idTestResult);\n return self._actionTestSetDetailsCommon(oTestResultData.idTestSet);", "def test_arrange_test_result_one_module(self):\n pass_1 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_2 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_3 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n fail_1 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n fail_2 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n ignore_1 = self._create_test_result(status=test_runner_base.IGNORED_STATUS)\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([pass_1, pass_2, pass_3])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([fail_1, fail_2, ignore_1])\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_summary, info_dict[aei._TOTAL_SUMMARY_KEY])", "def test_rule_create_command_success_hr(rule_create_success, rule_create_success_hr):\n resp = prepare_rule_create_output(rule_create_success)\n assert resp == rule_create_success_hr", "def test_importer_returns_tests():\n flowtask = FlowTaskFactory()\n flowtask.build_flow.build.org = OrgFactory()\n with temporary_dir() as output_dir:\n copyfile(\n TEST_ROBOT_OUTPUT_FILES / \"robot_with_failures.xml\",\n Path(output_dir) / \"output.xml\",\n )\n actual = robot_importer.import_robot_test_results(flowtask, output_dir)\n expected = [\n {\n \"name\": \"Passing test\",\n \"group\": \"Robot Fail\",\n \"status\": \"Pass\",\n \"start_time\": \"2020-06-23T18:49:20.955000+00:00\",\n \"end_time\": \"2020-06-23T18:49:20.956000+00:00\",\n \"exception\": \"Life is good, yo.\",\n \"doc\": \"\",\n \"tags\": [\"tag one\", \"tag two\"],\n },\n {\n \"name\": \"Failing test 1\",\n \"group\": \"Robot Fail\",\n \"status\": \"Fail\",\n \"start_time\": \"2020-06-23T18:49:20.957000+00:00\",\n \"end_time\": \"2020-06-23T18:49:20.960000+00:00\",\n \"exception\": \"Danger, Will Robinson!\",\n \"doc\": \"A test that fails with a keyword directly in the test\",\n \"tags\": [],\n },\n {\n \"name\": \"Failing test 2\",\n \"group\": \"Robot Fail\",\n \"status\": \"Fail\",\n \"start_time\": \"2020-06-23T18:49:20.960000+00:00\",\n \"end_time\": \"2020-06-23T18:49:20.963000+00:00\",\n \"doc\": \"A test that fails due to a failure in a lower level keyword.\",\n \"exception\": \"I'm sorry, Dave. I'm afraid I can't do that.\",\n \"tags\": [],\n },\n {\n \"name\": \"Failing test 3\",\n \"group\": \"Robot Fail\",\n \"status\": \"Fail\",\n \"start_time\": \"2020-06-23T18:49:21.017000+00:00\",\n \"end_time\": \"2020-06-23T18:49:21.024000+00:00\",\n \"exception\": (\n \"Several failures occurred:\\n\\n\"\n \" 1) First failure\\n\\n\"\n \" 2) Second failure\"\n ),\n \"doc\": \"A test that has multiple keyword failures\",\n \"tags\": [],\n },\n ]\n assert actual == expected", "def get_result(self) -> Any:\n ...", "def parse_verifier_result(self):\n stat = self.get_verifier_result(self.verification_id)\n try:\n num_executed = stat['num_tests'] - stat['num_skipped']\n try:\n self.result = 100 * stat['num_success'] / num_executed\n except ZeroDivisionError:\n self.result = 0\n if stat['num_tests'] > 0:\n LOGGER.info(\"All tests have been skipped\")\n else:\n LOGGER.error(\"No test has been executed\")\n return\n\n with open(os.path.join(self.res_dir, \"rally.log\"),\n 'r', encoding='utf-8') as logfile:\n output = logfile.read()\n\n success_testcases = []\n for match in re.findall(r'.*\\{\\d{1,2}\\} (.*?) \\.{3} success ',\n output):\n success_testcases.append(match)\n failed_testcases = []\n for match in re.findall(r'.*\\{\\d{1,2}\\} (.*?) \\.{3} fail',\n output):\n failed_testcases.append(match)\n skipped_testcases = []\n for match in re.findall(r'.*\\{\\d{1,2}\\} (.*?) \\.{3} skip(?::| )',\n output):\n skipped_testcases.append(match)\n\n self.details = {\"tests_number\": stat['num_tests'],\n \"success_number\": stat['num_success'],\n \"skipped_number\": stat['num_skipped'],\n \"failures_number\": stat['num_failures'],\n \"success\": success_testcases,\n \"skipped\": skipped_testcases,\n \"failures\": failed_testcases}\n except Exception: # pylint: disable=broad-except\n self.result = 0\n\n LOGGER.info(\"Tempest %s success_rate is %s%%\",\n self.case_name, self.result)", "def test_results(self):\n result = self.test_client._results\n\n assert isinstance(result, list)\n assert len(result) == 1", "def test_virtual_service_create_command_for_human_readable(\n virtual_service_create_success, virtual_service_success_hr\n):\n resp = prepare_virtual_service_output(virtual_service_create_success)\n assert resp == virtual_service_success_hr", "def _get_target_and_expected(self):\n\n out = [\n 'This is one hell of a test',\n 'Vraiment!',\n ]\n err = None\n exitcode = 0\n\n def my_target():\n return out\n\n return my_target, out, err, exitcode", "def GetTestWrapper(self):\n return ''", "def _writeMockResultFile(result):\n with open(result.filename, 'w') as f:\n f.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n')\n if len(result.suites) > 1 or result.noSuitesRoot is False:\n f.write('<testsuites>\\n')\n for suite in result.suites:\n f.write('<testsuite tests=\"'+str(suite.tests)+'\" failures=\"'+str(suite.fail)+'\" time=\"'+str(suite.time)+'\" errors=\"'+str(suite.errors)+'\" name=\"'+suite.name+'\">\\n')\n for case in suite.cases:\n f.write('<testcase name=\"'+case.name+'\" status=\"run\" time=\"'+str(case.time)+'\" classname=\"'+case.classname+'\">\\n')\n for error in case.errorList:\n f.write('<failure message=\"'+error.value+'\" type=\"'+error.value+'\"/>\\n')\n f.write('</testcase>\\n')\n f.write('</testsuite>\\n')\n if len(result.suites) > 1 or result.noSuitesRoot is False:\n f.write('</testsuites>\\n')", "def test_ruleset_create_command_success_hr(\n ruleset_create_success, ruleset_create_success_hr\n):\n mock_name = \"Trial ruleset from test cases - 1\"\n resp = prepare_ruleset_create_output(ruleset_create_success, mock_name)\n assert resp == ruleset_create_success_hr", "def checkResult(self, (results, authority, additional), qtype):\n result = results[0]\n self.assertEquals(str(result.name), self.hostname)\n self.assertEquals(result.type, qtype)", "def test_workload_get_command_human_readable(\n workload_get_success, workload_get_success_hr\n):\n hr_output = prepare_workload_get_output(workload_get_success)\n assert hr_output == workload_get_success_hr", "def test_add_labware_result() -> None:\n result = AddLabwareDefinitionResult(\n loadName=\"loadname\",\n namespace=\"ns\",\n version=1,\n )\n\n assert result.loadName == \"loadname\"\n assert result.namespace == \"ns\"\n assert result.version == 1", "def test_result(self):\n result = compute()\n self.assertEqual(result, '4782')\n print(\"eulpy25Test passed\")", "def compare_test_output(concrete_test_case):\n _, (resources, test_case), _ = concrete_test_case\n resources = dict(resources)\n test_case = dict(test_case)\n args = tuple(test_case.get('args', ()))\n kwargs = dict(test_case.get('kwargs', ()))\n\n return file_list.get_output_and_expected(\n test_case['name'],\n test_case['purpose'],\n args,\n kwargs,\n resources['out_dir'],\n resources['expected_dir'])", "def add_test_result(self, request):\n request.worker.add_test_result(request.message.test_id,\n request.message.code,\n request.message.info)\n\n return SuccessReply()", "def _create_output_test(test_src, tested_function, options=None):\n def do_test_expected(self):\n \"\"\"\n Execute a test by calling a tested_function on test_src data.\n \"\"\"\n self.maxDiff = None\n\n # We currently don't throw any exceptions in Writer, so this\n # this is always false\n if 'error' in test_src:\n self.assertRaises(test_src['error'], yamlish.dumps,\n test_src['in'], options)\n else:\n logging.debug(\"out:\\n%s\", textwrap.dedent(test_src['out']))\n want = yaml.load(textwrap.dedent(test_src['out']))\n logging.debug(\"want:\\n%s\", want)\n with tempfile.NamedTemporaryFile() as test_file:\n tested_function(test_src['in'], test_file)\n test_file.seek(0)\n got_str = test_file.read()\n logging.debug(\"got_str = %s\", got_str)\n got = yaml.load(got_str)\n self.assertEqual(got, want, \"Result matches\")\n\n return do_test_expected", "def collect_result_metadata(metadata):\n try:\n yield\n metadata['test_status'] = 'succeeded'\n except Exception as exn:\n metadata['test_status'] = 'failed'\n metadata['exception'] = {\n 'class': exn.__class__.__name__,\n 'message': str(exn)\n }\n raise", "def test_convert():", "def get_results(self):\n error_dict = {'error_code_test': self.error_code_test,\n 'error_text_test': self.error_text_test}\n\n return self.testresults, error_dict, self.checkstats", "def test_zet_resultaat(self, state, zrc_client, ztc_client):\n zrc_client.auth.set_claims(\n scopes=[\n 'zds.scopes.zaken.lezen',\n 'zds.scopes.zaken.bijwerken'\n ],\n zaaktypes=[state.zaaktype['url']]\n )\n resultaattype = ztc_client.retrieve('resultaattype', uuid=RESULTAATTYPE_UUID)\n\n assert 'url' in resultaattype\n\n resultaat = zrc_client.create('resultaat', {\n 'zaak': state.zaak['url'],\n 'resultaatType': resultaattype['url'],\n 'toelichting': 'Een toelichting op wat het resultaat',\n })\n\n assert 'url' in resultaat", "def test_zet_resultaat(self, state, zrc_client, ztc_client):\n zrc_client.auth.set_claims(\n scopes=[\n 'zds.scopes.zaken.lezen',\n 'zds.scopes.zaken.bijwerken'\n ],\n zaaktypes=[state.zaaktype['url']]\n )\n resultaattype = ztc_client.retrieve('resultaattype', uuid=RESULTAATTYPE_UUID)\n\n assert 'url' in resultaattype\n\n resultaat = zrc_client.create('resultaat', {\n 'zaak': state.zaak['url'],\n 'resultaatType': resultaattype['url'],\n 'toelichting': 'Een toelichting op wat het resultaat',\n })\n\n assert 'url' in resultaat", "def create_failure(test, time, failure):\n info = _TestInfo(test, time)\n info._failure = failure\n return info", "def test_T2():", "def test_T2():", "def run(self, result, debug=False):\n rv = unittest.TestSuite.run(self, result, debug)\n sys.stdout.flush()\n return rv", "def _AddResult(self):\n if not self._results:\n result = analyzer_result.AnalyzerResult()\n result.attribute_name = 'test_result'\n result.attribute_value = 'is_vegetable'\n self._results.append(result)", "def test_to_json(self):\n actual_result = ResultBuilder(IPERF_PARSER_EXPECTED_RESULT,\n OK_MESSAGE,\n OK_RETURN_CODE).build_json()\n self.assertMultiLineEqual(actual_result,\n EXPECTED_OUTPUT_BUILDER_RESULT)", "def test_T1():", "def test_T1():", "def test_successful(self):\n\n url = '/%s/job-types/' % self.api\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 6)\n for entry in result['results']:\n expected = None\n if entry['name'] == self.job_type1.name:\n expected = self.job_type1\n elif entry['name'] == self.job_type2.name:\n expected = self.job_type2\n elif entry['name'] == self.job_type3.name:\n expected = self.job_type3\n elif entry['name'] == self.job_type6.name:\n if entry['version'] == self.job_type4.version:\n expected = self.job_type4\n elif entry['version'] == self.job_type5.version:\n expected = self.job_type5\n elif entry['version'] == self.job_type6.version:\n expected = self.job_type6\n else:\n self.assertTrue(False, 'unexpected job type!')\n else:\n self.fail('Found unexpected result: %s' % entry['id'])\n\n self.assertEqual(entry['name'], expected.name)\n self.assertEqual(entry['title'], expected.get_title())\n self.assertEqual(entry['description'], expected.get_description())\n self.assertEqual(entry['icon_code'], expected.icon_code)\n self.assertEqual(entry['is_published'], expected.is_published)\n self.assertEqual(entry['is_active'], expected.is_active)\n self.assertEqual(entry['is_paused'], expected.is_paused)\n self.assertEqual(entry['is_system'], expected.is_system)\n self.assertEqual(entry['max_scheduled'], expected.max_scheduled)\n self.assertEqual(entry['revision_num'], expected.revision_num)\n self.assertEqual(entry['docker_image'], expected.docker_image)", "def create(self):\n response = self.request()\n result_obj = result.Result()\n result_obj.response = response\n\n self.set_result(result_obj, self.execution_type)\n if self.execution_type == \"async\":\n # We set the execution status to 0 as there is no way of knowing the\n # status of async call. Only while reading the response data we will set\n # the actual status code in the result object\n result_obj.set_status_code(int(0))\n return result_obj", "def test_traffic_analysis_human_readable(\n traffic_analysis_success, traffic_analysis_success_hr\n):\n resp = prepare_traffic_analysis_output(traffic_analysis_success)\n assert resp == traffic_analysis_success_hr", "def test_get_result_directory(self):\n pass", "def __repr__(self) -> str:\n return f\"<TestResult {self.test_id},{self.regression_test_id}: {self.exit_code} \" \\\n f\"(expected {self.expected_rc} in {self.runtime} ms>\"", "def create_dummy_build_result():\n\n date_time = datetime.utcnow()\n return BuildResults.create(\n job_name=\"my_jobname\",\n job_link=\"my_joburl\",\n build_date_time=str(date_time),\n build_id=\"1234\",\n platform=\"Linux-x86_64\",\n product=\"MyProduct\",\n )", "def _test_result(effect, margin, se, dof, coverage, effect_size_constituents):\n t_stat = (effect + margin) / se\n if margin:\n p_value = _one_sided_p_value(t_stat, dof)\n else:\n p_value = _two_sided_p_value(t_stat, dof)\n t_alpha = scipy.stats.t.isf((1 - coverage) / 2.0, dof)\n lower = effect - t_alpha * se\n upper = effect + t_alpha * se\n return TestResult(\n effect=effect,\n ci=(lower, upper),\n statistic=t_stat,\n dof=dof,\n pvalue=p_value,\n effect_size_constituents=effect_size_constituents)", "def assertResults(self, expected, result, deduped=False):\n self.assertEqual([u'shards'], result.keys())\n self.assertEqual(1, len(result[u'shards']))\n self.assertTrue(result[u'shards'][0], result)\n result = result[u'shards'][0].copy()\n self.assertFalse(result.get(u'abandoned_ts'))\n bot_version = result.pop(u'bot_version')\n self.assertTrue(bot_version)\n if result.get(u'costs_usd') is not None:\n expected.pop(u'costs_usd', None)\n self.assertLess(0, result.pop(u'costs_usd'))\n if result.get(u'cost_saved_usd') is not None:\n expected.pop(u'cost_saved_usd', None)\n self.assertLess(0, result.pop(u'cost_saved_usd'))\n self.assertTrue(result.pop(u'created_ts'))\n self.assertTrue(result.pop(u'completed_ts'))\n self.assertLess(0, result.pop(u'duration'))\n task_id = result.pop(u'task_id')\n run_id = result.pop(u'run_id')\n self.assertTrue(task_id)\n self.assertTrue(task_id.endswith('0'), task_id)\n if not deduped:\n self.assertEqual(task_id[:-1] + '1', run_id)\n self.assertTrue(result.pop(u'bot_idle_since_ts'))\n self.assertTrue(result.pop(u'modified_ts'))\n self.assertTrue(result.pop(u'started_ts'))\n\n if getattr(expected.get(u'output'), 'match', None):\n expected_output = expected.pop(u'output')\n output = result.pop('output')\n self.assertTrue(\n expected_output.match(output),\n '%s does not match %s' % (output, expected_output.pattern))\n\n # Bot python version may be different.\n result[u'bot_dimensions'] = sorted(\n [d for d in result[u'bot_dimensions'] if not d['key'] == 'python'])\n\n self.assertEqual(expected, result)\n return bot_version", "def query_test_results(self, res, step_name='query_test_results'):\n return self._proto_step_result(res, step_name)", "def export_result(job_managers: 'list[job_manager.JobManager]', total_time: int):\n failed_jobs = 0 # type: int\n info(\"Exporting test output file\")\n root = Element('testsuite')\n\n for job_item in job_managers:\n child = SubElement(root, \"testcase\")\n # Add a message to the error\n child.attrib[\"name\"] = str(job_item.raw_job_id)\n if job_item.status.job_state != utils.JobState.COMPLETE:\n failed_jobs += 1\n sub_child = SubElement(child, \"failure\")\n sub_child.attrib[\"message\"] = str(\"Job [{}] failed due the ERROR: [{}]\".format(\n job_item.job_id, job_item.status.job_state))\n\n sub_child.text = str(job_item.status.message)\n\n # Add the time it took for this test to compete.\n if job_item.duration is not None:\n info(\"Job {} took {} to complete\".format(job_item.job_id, job_item.duration))\n # If the job failed we set the duration to 0\n job_duration = \"0:00:00\"\n try:\n converted_time = time.strptime(str(job_item.duration).split('.')[0], '%H:%M:%S')\n total_seconds = datetime.timedelta(hours=converted_time.tm_hour, minutes=converted_time.tm_min,\n seconds=converted_time.tm_sec).total_seconds()\n except ValueError as e:\n child.attrib[\"time\"] = job_duration\n \n child.attrib[\"time\"] = str(total_seconds)\n # job did not run, so the test did not run\n else:\n child.attrib[\"time\"] = \"0:00:00\"\n\n root.attrib[\"failures\"] = str(failed_jobs)\n root.attrib[\"tests\"] = str(len(job_managers))\n\n root.attrib[\"time\"] = str(total_time.total_seconds())\n tree = ElementTree(root)\n tree.write(\"Tests/output.xml\")", "def get_test(arn=None):\n pass", "def test_create_factory_method():\n date_time = datetime.utcnow()\n build_results = BuildResults.create(\n job_name=\"my_jobname\",\n job_link=\"my_joburl\",\n build_date_time=str(date_time),\n build_id=\"1234\",\n platform=\"Linux-x86_64\",\n product=\"MyProduct\",\n )\n\n assert build_results.br_job_name == \"my_jobname\"\n assert build_results.br_job_url_key == \"my_joburl\"\n assert build_results.br_build_date_time == str(date_time)\n assert build_results.br_build_id_key == \"1234\"\n assert build_results.br_platform == \"Linux-x86_64\"\n assert build_results.br_product == \"MyProduct\"\n assert build_results.br_version_key == ebr_connector.__version__\n\n assert build_results.to_dict() == {\n \"br_build_date_time\": str(date_time),\n \"br_build_id_key\": \"1234\",\n \"br_job_name\": \"my_jobname\",\n \"br_job_url_key\": \"my_joburl\",\n \"br_platform\": \"Linux-x86_64\",\n \"br_product\": \"MyProduct\",\n \"br_version_key\": ebr_connector.__version__,\n }", "def __compare_results(self, expected, result, test_case, time):\n # type: (str, str, str, datetime.datetime) -> None\n if expected == result:\n self.pass_test(test_case, time)\n else:\n self.fail_test(test_case, time,\n error=\"(expected:received) (\" + expected + \":\" +\n result + \")\")", "def __init__(self, verbosity=1):\n unittest.TestResult.__init__(self)\n self.stdout0 = None\n self.stderr0 = None\n self.success_count = 0\n self.failure_count = 0\n self.error_count = 0\n self.total_count = 0\n self.verbosity = verbosity\n self.result = []", "def __init__(self, test_case_name):\n Result.__init_output_folder()\n self.__test_result = {} # Store information of a test case\n self.__run = [] # Store information of steps in test case\n self.__test_result[Result.__TEST_CASE] = test_case_name\n self.__test_result[Result.__RESULT] = Status.FAILED\n self.__test_result[Result.__START_TIME] = \\\n str(time.strftime(\"%Y-%m-%d_%H-%M-%S\"))\n self.__json_file_path = \\\n \"{}{}_{}.json\".format(Result.__json_dir,\n self.__test_result[Result.__TEST_CASE],\n self.__test_result[Result.__START_TIME])\n Result.result_of_all_tests.append(self.__json_file_path)", "def extract_results_test(self):\n assert len(self.results.keys()) != 0\n TESTS = [\n {\n \"input\": {\"molecules\": [\"DDSPDLPK\"], \"score_threshold\": 0.95},\n \"output\": {\n \"formula\": \"C(37)H(59)N(9)O(16)\",\n \"file_name\": \"BSA1.mzML\",\n \"scaling_factor\": 100,\n \"spec_id\": 1337,\n },\n }\n ]\n for test_dict in TESTS:\n for key, n, entry in self.results.extract_results(**test_dict[\"input\"]):\n print(key, entry)\n assert key.formula == test_dict[\"output\"][\"formula\"]\n assert key.file_name == test_dict[\"output\"][\"file_name\"]\n assert entry.scaling_factor == test_dict[\"output\"][\"scaling_factor\"]\n assert entry.spec_id == test_dict[\"output\"][\"spec_id\"]\n # print(self.results)\n # print(self.results.lookup)\n assert n == 0", "def test_api_response_data(self):", "def run(self, test):\n class_ = test.__class__\n classname = class_.__module__ + '.' + class_.__name__\n if self._stream is None:\n filename = 'TEST-%s.xml' % classname\n stream = open(os.path.join(self._path, filename), 'w')\n stream.write('<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n')\n else:\n stream = self._stream\n\n result = _XMLTestResult(classname)\n start_time = time.time()\n\n # TODO: Python 2.5: Use the with statement\n old_stdout = sys.stdout\n old_stderr = sys.stderr\n sys.stdout = StringIO()\n sys.stderr = StringIO()\n\n try:\n test(result)\n try:\n out_s = sys.stdout.getvalue()\n except AttributeError:\n out_s = ''\n try:\n err_s = sys.stderr.getvalue()\n except AttributeError:\n err_s = ''\n finally:\n sys.stdout = old_stdout\n sys.stderr = old_stderr\n\n time_taken = time.time() - start_time\n result.print_report(stream, time_taken, out_s, err_s)\n stream.flush()\n\n result.print_report_text(sys.stdout, time_taken, out_s, err_s)\n\n return result", "def process_ResultCheck(self):\n try:\n cmd = self.ExecutionTask.get_param().split(',')\n logging.debug(\"%s-%s-%s-%s-%s\" % ( TestScriptSymbolTable.get_value_from_sym_tab(cmd[0], TestScriptSymbolTable.test_script_sym_tab),cmd[0], cmd[1], cmd[2], cmd[3]))\n\n checkval = cmd[0].split('!') \n \n cval = TestScriptSymbolTable.get_value_from_sym_tab(checkval[1], TestScriptSymbolTable.capi_cmd_ret_sym_tab)\n\n if int(cval) >= int(cmd[1]):\n result = cmd[2]\n else:\n result = cmd[3]\n\n logging.info(\"\\nRESULT CHECK---> %15s\", result) \n self.setTestResult(result)\n \n #if result == 'FAIL':\n if 'FAIL' in result:\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n except OSError:\n logging.info(\"\\nException - ResultCheck\")", "def validate_result(self):\n raise NotImplementedError", "def create_test_summary(args, TEST_RESULTS):\n logging.error(\"Creating test summary report...\")\n\n try:\n test_summary = \"Performance Metrics of {APP} Application Tested from this PR\\n\".format(APP=args.bundle_id)\n test_summary += \"---------------------------------------------------------------\\n\"\n\n for element in TEST_RESULTS:\n if element != LAUNCHES:\n test_summary += \"> {KEY}: {VALUE}\".format(KEY=element, VALUE=TEST_RESULTS[element])\n if element == INSTALL_LAUNCH_DURATION:\n if int(TEST_RESULTS[INSTALL_LAUNCH_DURATION]) > args.duration_limit:\n test_summary += \"ms :x:\\n\"\n else:\n test_summary += \"ms :white_check_mark:\\n\"\n\n if element == INSTALL_MEMORY_USAGE:\n if int(TEST_RESULTS[INSTALL_MEMORY_USAGE]) > args.memory_limit:\n test_summary += \"MB :x:\\n\"\n else:\n test_summary += \"MB :white_check_mark:\\n\"\n\n if element == APP_SIZE:\n if int(TEST_RESULTS[APP_SIZE]) > args.size_limit:\n test_summary += \"MB :x:\\n\"\n else:\n test_summary += \"MB :white_check_mark:\\n\"\n test_summary += \"---------------------------------------------------------------\\n\"\n\n for element in TEST_RESULTS[LAUNCHES]:\n test_summary += \"> DEVICE: {DEVICE} | LAUNCH TYPE: {LAUNCH_TYPE} | \".format(DEVICE=element[DEVICE], LAUNCH_TYPE=element[LAUNCH_TYPE])\n test_summary += \"DURATION: {DURATION}ms \".format(DURATION=element[LAUNCH_DURATION])\n if int(element[LAUNCH_DURATION]) > args.duration_limit:\n test_summary += \" :x: | \"\n else:\n test_summary += \" :white_check_mark: | \"\n\n test_summary += \"MEMORY USAGE: {MEMORY_USAGE}MB \".format(MEMORY_USAGE=element[MEMORY_USAGE])\n if int(element[MEMORY_USAGE]) > args.memory_limit:\n test_summary += \" :x:\\n\"\n else:\n test_summary += \" :white_check_mark:\\n\"\n test_summary += \"----------------------------------------------------\\n\"\n\n except Exception as e:\n logging.error(\"Creating test summary failed with error '{ERROR}'\".format(ERROR=e))\n return None\n\n logging.info(test_summary)\n return test_summary", "def get_rest_result_template(self, result, command, success_code):\n result = {\"result\" : result,\n \"command\" : command,\n \"success_code\": success_code} # 0 - OK, >0 - Error number\n return result", "def test_parse_results_error():\n error_result = [{\"error\": \"test\"}]\n assert [{\"title\": \"Error\",\n \"subtitle\": \"test\",\n \"valid\": False}] == parse_results(error_result)", "def test_make_results_verbose1(self):\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\ttest.do_run()\n\t\ttest.make_results_verbose()\n\t\tobj_ut = test.results_verbose\n\t\tself.assertEqual(obj_ut, [['100', 'not good', 2, -1, 0],\n\t\t\t['100', 'not very good', 4, -1, 0]])", "def test_initialization_of_homework_result_solution():\n assert result_1.solution == \"I have done this hw\"", "def xml(self, time_taken, out, err):\n test_suite = ET.Element('testsuite')\n test_suite.set('errors', str(len(self.errors)))\n test_suite.set('failures', str(len(self.failures)))\n test_suite.set('name', self._test_name)\n test_suite.set('tests', str(self.testsRun))\n test_suite.set('time', '%.3f' % time_taken)\n for info in self._tests:\n test_suite.append(info.xml())\n system_out = ET.SubElement(test_suite, 'system-out')\n system_out.text = cdata(self.filter_nonprintable_text(out))\n system_err = ET.SubElement(test_suite, 'system-err')\n system_err.text = cdata(self.filter_nonprintable_text(err))\n return ET.ElementTree(test_suite)", "def test_successful(self, mock_create, mock_msg_mgr):\n\n json_data = {\n \"input\" : {\n 'version': '6',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n result = json.loads(response.content)\n\n #Response should be new v6 job detail response\n self.assertEqual(result['execution'], None)\n self.assertEqual(result['max_tries'], 3)\n self.assertTrue('/%s/jobs/' % self.api in response['location'])\n mock_create.assert_called_once()", "def getResult(self):\n return self.ok", "def test_T3():", "def test_T3():", "def test_successful(self):\n\n url = '/%s/job-type-names/' % self.api\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 4)\n for entry in result['results']:\n expected = None\n if entry['name'] == self.job_type1.name:\n expected = self.job_type1\n elif entry['name'] == self.job_type2.name:\n expected = self.job_type2\n elif entry['name'] == self.job_type3.name:\n expected = self.job_type3\n elif entry['name'] == self.job_type6.name:\n expected = self.job_type6\n else:\n self.fail('Found unexpected result: %s' % entry['id'])\n self.assertEqual(entry['name'], expected.name)\n self.assertEqual(entry['title'], expected.get_title())\n self.assertEqual(entry['description'], expected.get_description())\n if entry['name'] == 'job-type-for-view-test':\n self.assertItemsEqual(entry['versions'], [\"1.0.0\", \"1.2.0\", \"1.10.0\"])\n else:\n self.assertItemsEqual(entry['versions'], [\"1.0.0\"])\n self.assertEqual(entry['latest_version'], expected.version)", "def test_T01():", "def test_create_run(self):\n pass", "def test_basic_execution(self):", "def test_get_task_output(self):\n pass", "def write_test_result(filesystem, port, results_directory, test_name, driver_output,\n expected_driver_output, failures):\n root_output_dir = results_directory\n writer = TestResultWriter(filesystem, port, root_output_dir, test_name)\n\n if driver_output.error:\n writer.write_stderr(driver_output.error)\n\n for failure in failures:\n # FIXME: Instead of this long 'if' block, each failure class might\n # have a responsibility for writing a test result.\n if isinstance(failure, (test_failures.FailureMissingResult,\n test_failures.FailureTextMismatch,\n test_failures.FailureTestHarnessAssertion)):\n writer.write_text_files(driver_output.text, expected_driver_output.text)\n writer.create_text_diff_and_write_result(driver_output.text, expected_driver_output.text)\n elif isinstance(failure, test_failures.FailureMissingImage):\n writer.write_image_files(driver_output.image, expected_image=None)\n elif isinstance(failure, test_failures.FailureMissingImageHash):\n writer.write_image_files(driver_output.image, expected_driver_output.image)\n elif isinstance(failure, test_failures.FailureImageHashMismatch):\n writer.write_image_files(driver_output.image, expected_driver_output.image)\n writer.write_image_diff_files(driver_output.image_diff)\n elif isinstance(failure, (test_failures.FailureAudioMismatch,\n test_failures.FailureMissingAudio)):\n writer.write_audio_files(driver_output.audio, expected_driver_output.audio)\n elif isinstance(failure, test_failures.FailureCrash):\n crashed_driver_output = expected_driver_output if failure.is_reftest else driver_output\n writer.write_crash_log(crashed_driver_output.crash_log)\n elif isinstance(failure, test_failures.FailureLeak):\n writer.write_leak_log(driver_output.leak_log)\n elif isinstance(failure, test_failures.FailureReftestMismatch):\n writer.write_image_files(driver_output.image, expected_driver_output.image)\n # FIXME: This work should be done earlier in the pipeline (e.g., when we compare images for non-ref tests).\n # FIXME: We should always have 2 images here.\n if driver_output.image and expected_driver_output.image:\n diff_image, _ = port.diff_image(expected_driver_output.image, driver_output.image)\n if diff_image:\n writer.write_image_diff_files(diff_image)\n else:\n _log.warning('ref test mismatch did not produce an image diff.')\n writer.write_image_files(driver_output.image, expected_image=None)\n if filesystem.exists(failure.reference_filename):\n writer.write_reftest(failure.reference_filename)\n else:\n _log.warning(\"reference %s was not found\", failure.reference_filename)\n elif isinstance(failure, test_failures.FailureReftestMismatchDidNotOccur):\n writer.write_image_files(driver_output.image, expected_image=None)\n if filesystem.exists(failure.reference_filename):\n writer.write_reftest(failure.reference_filename)\n else:\n _log.warning(\"reference %s was not found\", failure.reference_filename)\n else:\n assert isinstance(failure, (test_failures.FailureTimeout, test_failures.FailureReftestNoImagesGenerated))\n\n if expected_driver_output is not None:\n writer.create_repaint_overlay_result(driver_output.text, expected_driver_output.text)", "def test_call_result_as_dict(self):\r\n exp_assignments = rdp_test1_expected_dict\r\n min_confidence = self.default_app.Params['Confidence']\r\n\r\n # Since there is some variation in the assignments, run\r\n # 10 trials and make sure we get the expected result at least once\r\n num_trials = 10\r\n unverified_seq_ids = set(exp_assignments.keys())\r\n for i in range(num_trials):\r\n obs_assignments = self.default_app(self.tmp_seq_filepath)\r\n for seq_id in list(unverified_seq_ids):\r\n obs_assignment, obs_confidence = obs_assignments[seq_id]\r\n exp_assignment, exp_confidence = exp_assignments[seq_id]\r\n self.assertTrue(obs_confidence >= min_confidence)\r\n if obs_assignment == exp_assignment:\r\n unverified_seq_ids.remove(seq_id)\r\n if not unverified_seq_ids:\r\n break\r\n\r\n messages = []\r\n for seq_id in unverified_seq_ids:\r\n messages.append(\r\n \"Unable to verify %s in %s trials\" % (seq_id, num_trials))\r\n messages.append(\" Expected: %s\" % exp_assignments[seq_id][0])\r\n messages.append(\" Observed: %s\" % obs_assignments[seq_id][0])\r\n messages.append(\" Confidence: %s\" % obs_assignments[seq_id][1])\r\n\r\n # make sure all taxonomic results were correct at least once\r\n self.assertFalse(unverified_seq_ids, msg='\\n'.join(messages))", "def get_test_status(self) -> str:\n return self.__test_result[Result.__RESULT]", "def test_tests():\n submission = SubmissionBuilder(\"t\", \"b\", [\"anything\"]).build()\n assert submission.get(\"results\") == [\"anything\"], submission", "def test_get_result_string_1(self):\n attr_list = [\"type\", \"phage_id\", \"eval_mode\"]\n string = import_genome.get_result_string(self.tkt, attr_list)\n exp = \"type: replace, phage_id: Trixie, eval_mode: final\"\n self.assertEqual(string, exp)", "def build_test(base_url, node, input_test = None):\n\n mytest = input_test\n if not mytest:\n mytest = Test()\n\n node = lowercase_keys(flatten_dictionaries(node)) #Clean up for easy parsing\n\n #Copy/convert input elements into appropriate form for a test object\n for configelement, configvalue in node.items():\n #Configure test using configuration elements\n if configelement == u'url':\n assert isinstance(configvalue,str) or isinstance(configvalue,unicode) or isinstance(configvalue,int)\n mytest.url = base_url + unicode(configvalue,'UTF-8').encode('ascii','ignore')\n elif configelement == u'method': #Http method, converted to uppercase string\n var = unicode(configvalue,'UTF-8').upper()\n assert var in HTTP_METHODS\n mytest.method = var\n elif configelement == u'group': #Test group\n assert isinstance(configvalue,str) or isinstance(configvalue,unicode) or isinstance(configvalue,int)\n mytest.group = unicode(configvalue,'UTF-8')\n elif configelement == u'name': #Test name\n assert isinstance(configvalue,str) or isinstance(configvalue,unicode) or isinstance(configvalue,int)\n mytest.name = unicode(configvalue,'UTF-8')\n elif configelement == u'validators':\n #TODO implement more validators: regex, file/schema match, etc\n if isinstance(configvalue, list):\n for var in configvalue:\n myquery = var.get(u'query')\n myoperator = var.get(u'operator')\n myexpected = var.get(u'expected')\n myexportas = var.get(u'export_as')\n\n # NOTE structure is checked by use of validator, do not verify attributes here\n # create validator and add to list of validators\n if mytest.validators is None:\n mytest.validators = list()\n validator = Validator()\n validator.query = myquery\n validator.expected = myexpected\n validator.operator = myoperator if myoperator is not None else validator.operator\n validator.export_as = myexportas if myexportas is not None else validator.export_as\n mytest.validators.append(validator)\n else:\n raise Exception('Misconfigured validator, requires type property')\n elif configelement == u'body': #Read request body, either as inline input or from file\n #Body is either {'file':'myFilePath'} or inline string with file contents\n if isinstance(configvalue, dict) and u'file' in lowercase_keys(configvalue):\n var = lowercase_keys(configvalue)\n assert isinstance(var[u'file'],str) or isinstance(var[u'file'],unicode)\n mytest.body = os.path.expandvars(read_file(var[u'file'])) #TODO change me to pass in a file handle, rather than reading all bodies into RAM\n elif isinstance(configvalue, str):\n mytest.body = configvalue\n else:\n # TODO add ability to handle input of directories or file lists with wildcards to test against multiple bodies\n raise Exception('Illegal input to HTTP request body: must be string or map of file -> path')\n\n elif configelement == 'headers': #HTTP headers to use, flattened to a single string-string dictionary\n mytest.headers = flatten_dictionaries(configvalue)\n elif configelement == 'expected_status': #List of accepted HTTP response codes, as integers\n expected = list()\n #If item is a single item, convert to integer and make a list of 1\n #Otherwise, assume item is a list and convert to a list of integers\n if isinstance(configvalue,list):\n for item in configvalue:\n expected.append(int(item))\n else:\n expected.append(int(configvalue))\n mytest.expected_status = expected\n elif configelement == 'stop_on_failure':\n mytest.stop_on_failure = safe_to_bool(configvalue)\n\n #Next, we adjust defaults to be reasonable, if the user does not specify them\n\n #For non-GET requests, accept additional response codes indicating success\n # (but only if not expected statuses are not explicitly specified)\n # this is per HTTP spec: http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.5\n if 'expected_status' not in node.keys():\n if mytest.method == 'POST':\n mytest.expected_status = [200,201,204]\n elif mytest.method == 'PUT':\n mytest.expected_status = [200,201,204]\n elif mytest.method == 'DELETE':\n mytest.expected_status = [200,202,204]\n\n return mytest", "def make_results(self):\n statistic_value, p_value = self.stats\n accept_hypothesis = self.accept_hypothesis(statistic_value)\n\n return FrequentistTestResults(\n control=self.comparison.d2,\n variation=self.comparison.d1,\n delta=self.comparison.delta,\n delta_relative=self.comparison.delta_relative,\n effect_size=self.comparison.effect_size,\n alpha=self.comparison.alpha,\n power=self.comparison.power,\n confidence_interval=self.ci,\n test_statistic=self.test_statistic,\n statistic_value=statistic_value,\n p_value=p_value,\n df=None,\n hypothesis=self.hypothesis_text,\n accept_hypothesis=accept_hypothesis,\n inference_procedure=self,\n warnings=self.comparison.warnings\n )", "def test_10(self, test):\r\n return test.MANUAL()", "def test_02(self, test):\r\n\r\n return test.MANUAL()", "def check_result(context, expected):\n assert context.result == expected, \"Wrong result: {r} != {e}\".format(\n r=context.result, e=expected\n )" ]
[ "0.73380005", "0.707614", "0.6962716", "0.68611425", "0.67847985", "0.671325", "0.67094016", "0.67094016", "0.66369814", "0.64441335", "0.64401895", "0.64096403", "0.62658703", "0.6241152", "0.6234344", "0.6229282", "0.6228823", "0.62096107", "0.61986804", "0.6181871", "0.61169815", "0.6110971", "0.60708463", "0.6070631", "0.6025079", "0.59909356", "0.5985757", "0.59823847", "0.5978657", "0.5977935", "0.5965046", "0.5957559", "0.594874", "0.5947789", "0.594561", "0.5930179", "0.59265643", "0.5924296", "0.5916222", "0.5882733", "0.5880989", "0.5867039", "0.5865666", "0.5846069", "0.5840145", "0.5838365", "0.5838365", "0.58306533", "0.5825166", "0.5825166", "0.5814757", "0.58104634", "0.58102113", "0.5808088", "0.5808088", "0.5806014", "0.5805133", "0.580265", "0.577528", "0.57703537", "0.5770273", "0.57565355", "0.57520217", "0.5747731", "0.5746348", "0.5745856", "0.57445544", "0.57108384", "0.57024544", "0.5698075", "0.569541", "0.569235", "0.5676841", "0.5667697", "0.5667124", "0.5662877", "0.56613404", "0.56595886", "0.5657348", "0.5657201", "0.5654564", "0.56466985", "0.5644115", "0.5640987", "0.5640987", "0.5639974", "0.56389594", "0.5637906", "0.56357694", "0.5633386", "0.5622456", "0.5622375", "0.5618854", "0.5603364", "0.5597992", "0.5594922", "0.55887127", "0.5587113", "0.55802274", "0.5577804" ]
0.8006094
0
initializes the Minesweeper instance with a width, height, and the number of mines. Sets up a default game table, generates random mine locations and updates another table for the solution.
def __init__(self, height, width, mines): self.x = int(width) self.y = int(height) self.table_state = [ ['-' for i in xrange(0, self.x)] for j in xrange(0, self.y)] self.mine_locations = self.generate_mines(int(mines)) self.final_table = self.generate_answer()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, rows, cols, mines):\n self.rows = rows\n self.cols = cols\n self.mines = mines\n self.opened = 0\n self.game_won = False\n self.game_lost = False\n self.board = self.__init__minefield__()\n self.tiles = self.__init__tiles__()", "def __init__(self, density=0.25, width=10, height=10, side= 20, autoreveal=False):\n\t\ttkinter.Frame.__init__(self, None)\n\t\tself.master.title(\"Minesweeper\")\n\t\tself.grid()\n\t\tself.width, self.height, self.side = width, height, side\n\t\tself.density = density\n\t\tself.auto = autoreveal\n\t\tself.game = None\n\t\tself.bind_all(\"q\", lambda a: self.quit())\n\t\t# create button\n\t\ttkinter.Button(self, text=\"NEW\", relief=\"groove\", command=self.new_game).grid(row=0, column=0)\n\t\ttkinter.Button(self, text=\"HINT\", relief=\"groove\", command=self.hint).grid(row=0, column=2)\n\t\tself.label = tkinter.Label(self, text=\"\")\n\t\tself.label.grid(row=0, column=1)\n\t\t# create mine field\n\t\tself.canvas = tkinter.Canvas(self, width=width*side, height=height*side, bg=\"white\")\n\t\tself.canvas.grid(row=1, column=0, columnspan=3)\n\t\tself.canvas.bind(\"<Button>\", self.reveal_cell)\n\t\tself.new_game()", "def __init__(self, mine_count=BOARD_DIM[\"MINE_COUNT\"], width=BOARD_DIM[\"BOARD_WIDTH\"],\n height=BOARD_DIM[\"BOARD_HEIGHT\"]):\n if height is None:\n height = width\n if mine_count > height * width:\n raise TooManyMineException\n self.height = height\n self.width = width\n self.mine_count = mine_count\n self.chessboard = [[Point(x, y) for x in range(width)] for y in range(height)]\n self.mines = [-1 for z in range(mine_count)]\n self.initialise()", "async def minesweeper(self, ctx, width: int = 5, height: int = 5, mines: int = 5, debug=None):\n # Check for invalid input\n if width <= 0 or height <= 0 or mines <= 0:\n await ctx.send('Please enter width, height, and number of mines greater than zero.', delete_after=10)\n return\n\n # Max board that doesn't break is 13x13. TODO restrict this? max len is 2048 chars\n minefield = [[0 for h in range(height)] for w in range(width)]\n\n for i in range(mines):\n while True: # TODO consider replacing with random.choice() for the lists\n # Find an empty space and place a mine there\n x, y = random.randint(0, width - 1), random.randint(0, height - 1)\n if minefield[x][y] != 'M':\n minefield[x][y] = 'M'\n break\n # Mark the other ones\n for x in range(width):\n for y in range(height):\n if minefield[x][y] != 'M':\n total_mines = 0\n # Check the 8 tiles around it\n for dy in range(-1, 2):\n for dx in range(-1, 2):\n if 0 <= x + dx < width and 0 <= y + dy < height:\n total_mines += int(minefield[x + dx][y + dy] == 'M')\n minefield[x][y] = total_mines\n\n # Convert the numbers to emoji\n for x in range(width):\n for y in range(height):\n if minefield[x][y] == 'M':\n minefield[x][y] = '\\U0001F4A3'\n else:\n # Convert the number to its proper keycap emoji variant\n minefield[x][y] = f'\\\\U0000003{str(minefield[x][y])}\\\\U0000FE0F\\\\U000020E3'.encode().decode('unicode-escape')\n\n # Create string to send\n text_field = 'Minesweeper:\\n'\n spoiler = lambda s: '||' + s + '||'\n\n for x in range(width):\n for y in range(height):\n text_field += spoiler(minefield[x][y]) if not debug == '1' else minefield[x][y]\n text_field += ' '\n text_field += '\\n'\n\n await ctx.send(text_field)", "def __init__minefield__(self):\n # Creates random locations of mines according to the size of the game board.\n mines = random.sample(range(0, self.rows * self.cols), self.mines)\n \n # Uses a helper method to initialize tile categories: mine or zero.\n return [[Tiles(i, j, self.create_tile(mines, i, j)) for j in range(self.cols)] for i in range(self.rows)]", "def __init__(self, height=50, width=50, mines=100):\n # Set initial width, height, and number of mines\n self.height = height\n self.width = width\n self.mines = set()\n\n # Initialize an empty field with no mines\n self.board = []\n for i in range(self.height):\n row = []\n for j in range(self.width):\n row.append(False)\n self.board.append(row)\n\n # Add mines randomly to the board\n while len(self.mines) != mines:\n i = random.randrange(height)\n j = random.randrange(width)\n if not self.board[i][j]:\n self.mines.add((i, j))\n self.board[i][j] = True\n\n # Maintain a set of mines that is found by the player\n self.mines_found = set() # initially this set is empty", "def test_generate_board_max_mines(self):\n # arrange\n game = minesweeper.Minesweeper()\n width = 10\n height = 12\n\n # act\n game.generate_board(width, height, width * height - 1)\n\n # assert\n self.assertEqual(width, len(game.board[0]), 'Board width incorrect.')\n self.assertEqual(height, len(game.board), 'Board height incorrect.')\n\n minesFound = (sum(1 for row in range(height)\n for col in range(width) if game.board[row][col].is_mine))\n\n self.assertEqual(width * height - 1, minesFound,\n 'Wrong number of mines found.')", "def test_generate_board_no_mines(self):\n # arrange\n game = minesweeper.Minesweeper()\n width = 10\n height = 12\n\n # act\n game.generate_board(width, height, 0)\n\n # assert\n self.assertEqual(width, len(game.board[0]), 'Board width incorrect.')\n self.assertEqual(height, len(game.board), 'Board height incorrect.')\n\n for row in range(height):\n self.assertFalse(any([s.is_mine for s in game.board[row]]),\n (\"Mine found when board was generated \" +\n \"with mineCount = 0.\"))", "def __init__(self, master, width, height, number):\n Frame.__init__(self, master)\n # create list of minesweeper cell coords\n cellCoords = []\n for h in range(height):\n for w in range(width):\n cellCoords.append((h, w))\n random.shuffle(cellCoords)\n # find bombs\n self.bombCoords = cellCoords[:number]\n self.bombs = []\n for bomb in self.bombCoords:\n self.bombs.append(MsCell(bomb, True, self))\n # make the rest non-bomb cells\n self.cellCoords = cellCoords[number:]\n self.nonExposed = self.cellCoords[:]\n self.nonBombcells = []\n for cell in self.cellCoords:\n adjacentBombs = 0\n for h in range(max(0, cell[0] - 1), min(cell[0] + 2, height)):\n for w in range(max(0, cell[1] - 1), min(cell[1] + 2, width)):\n if (h, w) in self.bombCoords:\n adjacentBombs += 1\n self.nonBombcells.append(MsCell(cell, adjacentBombs, self))\n # set cell\n self.number = number\n self.height = height\n self.width = width\n # set number label\n self.numberLabel = Label(master, text=str(self.number))\n self.numberLabel.grid(row=self.height, columnspan=self.width)", "def generate_mine_map(width=30, height=16, num_mines=99):\n\n if num_mines > width * height:\n print(\"The number of mines exceeds the size of the board.\")\n return\n \n mine_map = [[False for i in range(width)] for j in range(height)]\n mines = 0\n while mines < num_mines:\n x = random.randint(0, width-1)\n y = random.randint(0, height-1)\n if not mine_map[y][x]:\n mine_map[y][x] = True\n mines += 1\n\n return mine_map", "def __init__(self, size, given_cells):\n self.ROWS = string.ascii_uppercase[:size ** 2]\n self.COLS = [str(i) for i in range(1, size ** 2)]\n self.size = size\n self.given_cells = given_cells\n self.board = self.create_board()\n self.squares = [utility.cross(i, j) for i in [self.ROWS[i:i + size] for i in range(0, len(self.ROWS), size)]\n for j in [self.COLS[i:i + size] for i in range(0, len(self.COLS), size)]]\n self.attach_neighbors()\n self.update_neighbor_values_by_given()\n print(\"Initial board:\")\n GUI.print_sudoku(self.board, self.size)", "def __init__(self, rows, cols, mines):\n tk.Tk.__init__(self)\n \n #load all needed images into Tile.images\n for i in range(14):\n Tile.images.append(tk.PhotoImage(file = \"images/tile-\"+str(i)+\".gif\"))\n \n self.menu = tk.Menu(self)\n self.configure(menu=self.menu)\n self.title(\"Minesweeper\")\n self.myBoard = Board(rows, cols, mines, self)\n self.menuVar = tk.IntVar(self)\n self.menuVar.set(1)\n self.checkVar = tk.IntVar(self)\n self.checkVar.set(1)\n self.gamemenu = tk.Menu(self.menu, tearoff = False)\n self.menu.add_cascade(label=\"Game\", menu=self.gamemenu)\n self.gamemenu.add_command(label=\"New Game\", command=self.myBoard.replay)\n self.gamemenu.add_separator()\n self.gamemenu.add_radiobutton(variable = self.menuVar, value=1, label=\"Beginner\", command=lambda: self.resize(8,8,10))\n self.gamemenu.add_radiobutton(variable = self.menuVar, value=2, label=\"Intermediate\", command=lambda: self.resize(16,16,40))\n self.gamemenu.add_radiobutton(variable = self.menuVar, value=3, label=\"Expert\", command=lambda: self.resize(16,30,99))\n self.gamemenu.add_separator()\n self.gamemenu.add_checkbutton(variable = self.checkVar, onvalue=4, offvalue=0, label=\"Custom\", command= self.options)\n self.gamemenu.add_separator()\n self.gamemenu.add_command(label=\"Exit\", command=self.exitGame)\n windowWidth = str(20*cols+40)\n windowHeight = str(20*rows+60)\n self.protocol(\"WM_DELETE_WINDOW\", self.exitGame)\n self.minsize(windowWidth, windowHeight)\n self.maxsize(windowWidth, windowHeight)\n self.geometry(windowWidth+'x'+windowHeight)\n self.mainloop()", "def setUp(self):\r\n self.matrix = array(\r\n [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])\r\n self.cells = [(0, 1), (1, 3)]\r\n self.cells2 = [(0, 2), (2, 3)]", "def setUpBombs(self, event):\n pos = (event.widget.row * self.cols) + event.widget.col\n size = self.rows * self.cols\n \n #get a list random indexes in range to be mines\n mines = random.sample(range(size), self.numMines)\n if pos in mines:\n mines.remove(pos)\n temp = random.sample(range(size), 1)[0]\n while (temp == pos): temp = random.sample(range(size), 1)[0]\n mines.append(temp)\n \n #mark all mine squares as mines\n for mine in mines:\n targetRow = int(mine/self.cols)\n targetCol = mine % self.cols\n self.tiles[targetRow][targetCol].setMine()\n\n #calculate the number in each Square of the current game\n for row in self.tiles:\n for tile in row:\n if not tile.isMine():\n counter = 0\n for adjTile in self.getAdjacentTiles(tile.row,tile.col):\n if adjTile.isMine(): counter += 1\n tile.setCount(counter)\n \n self.minesArmed = True\n self.startTime = time.time()\n return 1", "def __init__ (self, cols = 6, rows = 7, requiredToWin = 4):\r\n\t\tself.cols = cols\r\n\t\tself.rows = rows\r\n\t\tself.win = requiredToWin\r\n\t\tself.board = [[NONE] * rows for _ in range(cols)]", "def setup_new_board(self):\n\n logger.info(u'setup_new_board()')\n\n self.squares = [[None for j in xrange(8)] for i in xrange(8)]\n \n self.black_checkers = [ch.Checker(u'black', self) for i in xrange(12)]\n self.white_checkers = [ch.Checker(u'white', self) for i in xrange(12)]\n\n u\"\"\" Place checkers in starting squares \"\"\"\n i = 0\n for row in xrange(3):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.white_checkers[i])\n i += 1\n\n i = 0\n for row in xrange(5, 8):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.black_checkers[i])\n i += 1", "def __init__(self, control_panel, width, height, percent_mined):\n\n super().__init__()\n\n # The following lines prevent an infinite loop at mine placement\n # if percent_mined is set too high.\n total_cells = width * height\n total_mines_allowed = total_cells - 9\n total_mines_requested = round(total_cells * percent_mined / 100)\n\n total_mines = min(total_mines_requested, total_mines_allowed)\n\n self.field = Minefield(total_mines)\n\n self.game_over = False\n self.win = False\n\n self.status_label = control_panel.status_label\n\n self.mine_count_label = control_panel.mine_count_label\n self._mines_left = 0\n self.mines_left = total_mines\n\n self.safe_count_label = control_panel.safe_count_label\n self._safes_left = 0\n self.safes_left = (width * height) - total_mines\n\n self.auto_solving = control_panel.auto_solving\n self.direction = control_panel.direction_panel.direction\n\n self.clear_queue = SuperQueue(self.field, color=\"clear_queue\", direction_var=self.direction)\n self.auto_queue = SuperQueue(self.field, color=\"auto_queue\", direction_var=self.direction)\n self.hyper_queue = SuperQueue(self.field, color=\"hyper_queue\", direction_var=self.direction)\n\n self.emphasis = {\n \"clear_queue\": control_panel.display_panel.clear_queue_settings,\n \"auto_queue\": control_panel.display_panel.auto_queue_settings,\n \"add_batch\": control_panel.display_panel.add_batch_settings,\n \"redundant\": control_panel.display_panel.redundant_settings,\n \"to_flag\": control_panel.display_panel.to_flag_settings,\n \"hyper_queue\": control_panel.display_panel.hyper_queue_settings,\n }\n\n # Create cells\n for x in range(width):\n for y in range(height):\n self.field[x, y] = Cell(master=self)\n self.field[x, y].grid(row=y, column=x)\n self.field[x, y].bind(\"<Button-1>\", lambda event, loc=(x, y): self.left_click(loc))\n self.field[x, y].bind(\"<Button-3>\", lambda event, loc=(x, y): self.toggle_flag(loc))", "def __init__(self):\n pygame.init()\n self.settings = Settings()\n self.number_cells_x = int(input(\"Enter number of cells in a row: \"))\n self.cell_width = float(self.settings.screen_width // self.number_cells_x)\n #print(self.cell_width)\n self.number_cells_y = int(self.settings.screen_height // self.cell_width)\n\n self.screen = pygame.display.set_mode((self.settings.screen_width,self.settings.screen_height))\n pygame.display.set_caption(\"Game of Life\")\n\n self.cells = []\n self.to_be_updated = []\n self._create_cells()\n\n self.bg_colour = (self.settings.bg_colour)\n self.waiting = True", "def __init__(self, size):\n self.size = size\n self.num_queens_placed = 0\n self.board = self.generate_board()", "def __init__(self):\n self._board_area = [[\" \" for i in range(20)] for j in range(20)]\n\n # Starting setup for board includes these coordinates black, and their mirror white\n black_start = [(1, 2), (2, 2), (2, 1), (2, 3), (3, 2), (4, 1), (4, 3), (5, 2), (6, 1), (6, 3), (7, 1),\n (7, 2), (7, 3), (8, 1), (8, 2), (8, 3), (9, 1), (9, 2), (9, 3), (10, 1), (10, 2), (10, 3),\n (11, 1), (11, 3), (12, 1), (12, 2), (12, 3), (13, 1), (13, 3), (14, 2), (15, 1), (15, 3),\n (16, 2), (17, 1), (17, 2), (17, 3), (18, 2), (2, 6), (5, 6), (8, 6), (11, 6),\n (14, 6), (17, 6)]\n\n # Border points set for clearing out stones that move beyond the border\n self._border = set((0, i) for i in range(20)) | set((19, i) for i in range(20))\n self._border = self._border | set((i, 0) for i in range(20)) | set((i, 19) for i in range(20))\n\n # Fill black and white stones\n for coord in black_start:\n self._board_area[coord[0]][coord[1]] = \"B\"\n self._board_area[coord[0]][-coord[1] - 1] = \"W\"\n\n # Alphabetic indexing of board for alpha-numeric movement inputs\n self._locmap = dict(zip(\"abcdefghijklmnopqrst\", range(20)))", "def add_mines(self):\n for x, y in sample(list(itertools.product(range(self.width), range(self.height))), self.num_mines):\n self.grid[y][x] = self.mine", "def test_generate_board_height_equal_to_width(self):\n # arrange\n game = minesweeper.Minesweeper()\n width = 20\n height = 20\n mines = int(width * height / 2)\n\n # act\n game.generate_board(width, height, mines)\n\n # assert\n self.assertEqual(width, len(game.board[0]), 'Board width incorrect.')\n self.assertEqual(height, len(game.board), 'Board height incorrect.')\n\n minesFound = (sum(game.board[row][col].is_mine for col in range(width)\n for row in range(height)))\n self.assertEqual(mines, minesFound,\n 'Wrong number of mines found.')", "def init_game(self):\n nrows = len(self.array)\n self.game_over = False\n self.squares_left = nrows * nrows\n self.bombs_left = 0\n # clear the board\n for i in xrange(nrows):\n for j in xrange(nrows):\n self.array[i][j].reset()\n # put N random bombs\n for i in xrange(nrows):\n rand_num = random.randrange(nrows*nrows)\n if self.array[rand_num / nrows][rand_num % nrows].type \\\n != SquareType.BOMB:\n self.insert_bomb(rand_num / nrows, rand_num % nrows)\n self.squares_left -= self.bombs_left\n self.print_board()", "def new_tile(self):\n zero_list = []\n zero_cell = ()\n # self._cells = [[0 for col in range(self._grid_width)] for row in range(self._grid_height)]\n for row in range(self._grid_height):\n for col in range(self._grid_width):\n if self._cells[row][col] == 0:\n zero_cell = (row, col)\n zero_list.append(zero_cell)\n if len(zero_list) > 0:\n chance = random.randrange(0,10)\n cell_idx = random.randrange(len(zero_list))\n if chance == 9:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 4\n else:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 2\n else:\n print(\"You lost! Better luck next time!\")", "def boardInit(self, width: int, height: int, bombs: int):\n # New board\n self.width = width\n self.height = height\n self.bombs = bombs\n self.board = minesweepergame.game(width, height, bombs)\n\n # New board items\n self.canvas.delete(*self.canvas.find_all())\n self.canvassquares: List[List[Optional[int]]] = [[None for y in range(\n self.board.height)] for x in range(self.board.width)] # Store the `_CanvasItemId`s\n self.canvasicons: List[List[Optional[int]]] = [[None for y in range(\n self.board.height)] for x in range(self.board.width)] # Store the `_CanvasItemId`s\n self.victoryMessage: Optional[int] = None\n\n # Render\n self.render()", "def __init__(self, width, height, walls = None):\r\n self.width = width\r\n self.height = height\r\n if walls:\r\n self.walls = walls\r\n else:\r\n self.walls = []\r\n self.goals = []\r\n self.tiles = []\r\n self._clear_map()", "def _generate_mines(self):\r\n mines_left = self.mines\r\n while mines_left > 0:\r\n gen_row = random.randint(0, self.rows-1)\r\n gen_col = random.randint(0, self.cols-1)\r\n\r\n if not self.fields[gen_row][gen_col].mine:\r\n self.fields[gen_row][gen_col].mine = True\r\n self._increment_fields_values(gen_row, gen_col)\r\n self.mines_cords.append((gen_row, gen_col))\r\n mines_left -= 1", "def test_generate_board_width_greater_than_height(self):\n # arrange\n game = minesweeper.Minesweeper()\n width = 19\n height = 10\n mines = int(width * height / 2)\n\n # act\n game.generate_board(width, height, mines)\n\n # assert\n self.assertEqual(width, len(game.board[0]), 'Board width incorrect.')\n self.assertEqual(height, len(game.board), 'Board height incorrect.')\n\n minesFound = (sum(game.board[row][col].is_mine for col in range(width)\n for row in range(height)))\n\n self.assertEqual(mines, minesFound,\n 'Wrong number of mines found.')", "def test_generate_board_too_many_mines_errors(self):\n # arrange\n game = minesweeper.Minesweeper()\n width = 10\n height = 12\n mines = int(width * height)\n\n # act and expect error\n with self.assertRaises(ValueError):\n game.generate_board(width, height, mines)", "def test_generate_board_height_greater_than_width(self):\n # arrange\n game = minesweeper.Minesweeper()\n width = 9\n height = 17\n mines = int(width * height / 2)\n\n # act\n game.generate_board(width, height, mines)\n\n # assert\n self.assertEqual(width, len(game.board[0]), 'Board width incorrect.')\n self.assertEqual(height, len(game.board), 'Board height incorrect.')\n\n minesFound = (sum(game.board[row][col].is_mine for col in range(width)\n for row in range(height)))\n\n self.assertEqual(mines, minesFound,\n 'Wrong number of mines found.')", "def __init__(self,m,n):\n self.columns = m\n self.rows = n\n self.board = makeBoard(m,n)", "def __init__(self):\n self.board = [[0 for i in range(9)]]*9\n self.board = [[0, 0, 0, 0, 3, 0, 9, 0, 0],\n [0, 0, 3, 0, 8, 0, 0, 0, 7],\n [6, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 5, 8, 3, 6, 0, 0, 0, 0],\n [0, 1, 0, 8, 9, 4, 0, 6, 0],\n [0, 0, 0, 0, 2, 7, 8, 4, 0],\n [0, 0, 9, 0, 0, 0, 0, 0, 8],\n [7, 0, 0, 0, 4, 0, 6, 0, 0],\n [0, 0, 5, 0, 1, 0, 0, 0, 0]]", "def __init__(self, n: int):\n self.size = n\n self.board = [[CellValues.EMPTY.value] * n for _ in range(n)]\n self.num_empty_cells = n * n", "def setUp(self):\n self.gameBoard = Grid((100, 100), Cell)", "def new_game(self):\n self.cells = [] # Array of cells\n self.frame_count = 0\n self.database = []\n self.timer = [Consts[\"MAX_TIME\"], Consts[\"MAX_TIME\"]]\n self.result = None\n # Define the players first\n self.cells.append(Cell(0, [Consts[\"WORLD_X\"] / 4, Consts[\"WORLD_Y\"] / 2], [0, 0], Consts[\"DEFAULT_RADIUS\"]))\n self.cells.append(Cell(1, [Consts[\"WORLD_X\"] / 4 * 3, Consts[\"WORLD_Y\"] / 2], [0, 0], Consts[\"DEFAULT_RADIUS\"]))\n # Generate a bunch of random cells\n for i in range(Consts[\"CELLS_COUNT\"]):\n if i < 4:\n rad = 1.5 + (random.random() * 1.5) # Small cells\n elif i < 10:\n rad = 10 + (random.random() * 4) # Big cells\n else:\n rad = 2 + (random.random() * 9) # Everything else\n x = Consts[\"WORLD_X\"] * random.random()\n y = Consts[\"WORLD_Y\"] * random.random()\n cell = Cell(i + 2, [x, y], [(random.random() - 0.5) * 2, (random.random() - 0.5) * 2], rad)\n safe_dist = Consts[\"SAFE_DIST\"] + rad\n while min(map(cell.distance_from, self.cells[:2])) < safe_dist:\n cell.pos = [\n Consts[\"WORLD_X\"] * random.random(),\n Consts[\"WORLD_Y\"] * random.random()\n ]\n self.cells.append(cell)", "def __init__(self, width=10, height=10, density=0.25):\n\t\tself.width = width\n\t\tself.height = height\n\t\t# create marks and mine field\n\t\tself.marks = [[CLOSED for _ in range(height)] for _ in range(width)]\n\t\tself.mines = [[random.random() < density for _ in range(height)] \n\t\t for _ in range(width)]", "def __init__(self, board_size=BOARD_SIZE, num_mines=NUM_MINES):\n\n self.board_size = board_size\n self.num_mines = num_mines\n self.board = place_mines(board_size, num_mines)\n self.my_board = np.ones((board_size, board_size), dtype=int) * CLOSED\n self.num_actions = 0\n\n self.observation_space = spaces.Box(low=-2, high=9,\n shape=(self.board_size, self.board_size), dtype=np.int)\n self.action_space = spaces.Discrete(self.board_size*self.board_size)\n self.valid_actions = np.ones((self.board_size * self.board_size), dtype=np.bool)", "def init():\n for i in range(COLS):\n for j in range(ROWS):\n BOARD[i][j] = int(random(2))", "def __init__(self):\n self.grid = {}\n for i in range(21):\n self.grid[i] = [' ']*21\n self._len_x = len(self.grid[0])\n self._len_y = len(self.grid)\n self.forbidden_tiles = []\n self.allowed_tiles = []\n self.exit = None\n self.entrance = None", "def init_board():\n\t# Generates a table 10*10 of 0s with -1 around and the initial state\n\t# of the board with 2 whites and 2 blacks in the middle\n\ttable = [[0 if i != 0 and i != 9 else -1 for i in range(10)] if j != 0 and j != 9 else [-1 for i in range(10)] for j in range(10)] #leaves a -1 line around the whole table of 0s\n\t#initial state is drawn and recorded\n\ttable[4][4] = 2\n\ttable[5][5] = 2\n\ttable[4][5] = 1\n\ttable[5][4] = 1\n\tdrawPiece((4,4),2)\n\tdrawPiece((5,5),2)\n\tdrawPiece((4,5),1)\n\tdrawPiece((5,4),1)\n\treturn table", "def new_game(self, width, height, minecount):\n self.start_screen.grid_forget()\n width, height, minecount = int(width), int(height), int(minecount)\n # always at least one non-mine square\n minecount = min(width*height-1, minecount)\n\n self.mineboard = Mineboard(\n width, height, minecount)\n self.change_display(width, height)\n self.scoreboard.grid(row=1, column=0, sticky='n')\n self.win_lose_lbl.grid_remove()\n self.start_time = time.time() # initial time for the game\n self.timer_update() # initial time display", "def __init__(self):\n self.maze = [['#','#','#','#','#','#','#','#','#','#','#',],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#','^','/',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ','@',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#','#','#','#','#','#','#','#','#','#','#'],\n ['#','#','#','#','#','#','#','#','#','#','#']]\n self.diamonds = 1\n self.width = 10\n self.height = 12\n self.crates = 1", "def __init__(self):\r\n self.rows = [[0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9]\r\n self.block1 = []\r\n self.block5 = []\r\n self.block9 = []\r\n self.puzzle = []\r\n self.score = 0\r\n self.difficulty = 1 # By default Easy difficulty\r\n\r\n \"\"\" Creating blocks using random number generator\"\"\"\r\n while len(self.block1) < 9:\r\n r = random.randrange(1,10)\r\n if r not in self.block1:\r\n self.block1.append(r)\r\n\r\n while len(self.block5) < 9:\r\n r = random.randrange(1,10)\r\n if r not in self.block5:\r\n self.block5.append(r)\r\n\r\n while len(self.block9) < 9:\r\n r = random.randrange(1,10)\r\n if r not in self.block9:\r\n self.block9.append(r)\r\n x = 0\r\n for i in range(3):\r\n for j in range(3):\r\n self.rows[i][j] = self.block1[x]\r\n x = x+1\r\n x = 0\r\n for i in range(3, 6):\r\n for j in range(3, 6):\r\n self.rows[i][j] = self.block5[x]\r\n x = x+1\r\n x = 0\r\n for i in range(6,9):\r\n for j in range(6,9):\r\n self.rows[i][j] = self.block9[x]\r\n x = x+1\r\n \"\"\"Creating a valid solution\"\"\"\r\n self.createsolution(self.rows)", "def __init__(self, board_size=BOARD_SIZE, num_mines=NUM_MINES):\n\n self.board_size = board_size\n self.num_mines = num_mines\n self.board = place_mines(board_size, num_mines)\n self.my_board = np.ones((board_size, board_size), dtype=int) * CLOSED\n self.valid_actions = np.ones((self.board_size, self.board_size), dtype=np.bool)\n\n self.observation_space = spaces.Box(low=-2, high=9,\n shape=(self.board_size, self.board_size), dtype=np.int)\n self.action_space = spaces.MultiDiscrete([self.board_size, self.board_size])", "def __init__(self, gridSize):\n self.grid = []\n self.player1Pos = []\n self.player2Pos = []\n self.winner = None\n self.gridSize = gridSize\n self.generate()", "def initialize_screen(self):\r\n\r\n pygame.init()\r\n pygame.display.set_caption('Minesweeper')\r\n\r\n screen_width = max(display_params.RECT_SIZE * self.cols + 2 * display_params.MARGIN_SIDE,\r\n display_params.MIN_SCREEN_WIDTH)\r\n screen_height = display_params.RECT_SIZE * self.rows + display_params.MARGIN_TOP + \\\r\n display_params.MARGIN_BOTTOM\r\n self.screen = pygame.display.set_mode((screen_width, screen_height))\r\n self.screen.fill(colors.NAVYBLUE)\r\n\r\n pygame.display.update()", "def __init__(self, rows, columns, live_probability=0.3, seed=0):\n self.live_probability = live_probability\n self.seed = seed\n self.rows = rows\n self.columns = columns\n self.grid = [\n [Cell() for column_cells in range(self.columns)]\n for row_cells in range(self.rows)\n ]\n\n self.generate_board()", "def initialize_board():\n # Wipe current board\n for x in range(len(THE_BOARD.positions)):\n for y in range(len(THE_BOARD.positions)):\n THE_BOARD.positions[x][y] = ' '\n\n all_pieces = []\n\n # Pawns\n white_pawns = [Pawn('white', (6, i)) for i in range(len(THE_BOARD.positions[6]))]\n black_pawns = [Pawn('black', (1, i)) for i in range(len(THE_BOARD.positions[1]))]\n all_pieces.extend(white_pawns)\n all_pieces.extend(black_pawns)\n\n # Rooks\n rook1 = Rook('black', (0, 0))\n all_pieces.append(rook1)\n rook2 = Rook('black', (0, 7))\n all_pieces.append(rook2)\n rook3 = Rook('white', (7, 0))\n all_pieces.append(rook3)\n rook4 = Rook('white', (7, 7))\n all_pieces.append(rook4)\n\n # Knights\n knight1 = Knight('black', (0, 1))\n all_pieces.append(knight1)\n knight2 = Knight('black', (0, 6))\n all_pieces.append(knight2)\n knight3 = Knight('white', (7, 1))\n all_pieces.append(knight3)\n knight4 = Knight('white', (7, 6))\n all_pieces.append(knight4)\n\n # Bishops\n bishop1 = Bishop('black', (0, 2))\n all_pieces.append(bishop1)\n bishop2 = Bishop('black', (0, 5))\n all_pieces.append(bishop2)\n bishop3 = Bishop('white', (7, 2))\n all_pieces.append(bishop3)\n bishop4 = Bishop('white', (7, 5))\n all_pieces.append(bishop4)\n\n # King and Queen\n queen1 = Queen('black', (0, 4))\n all_pieces.append(queen1)\n queen2 = Queen('white', (7, 4))\n all_pieces.append(queen2)\n king1 = King('black', (0, 3))\n all_pieces.append(king1)\n king2 = King('white', (7, 3))\n all_pieces.append(king2)\n\n # Add every single piece to the board. Only then can they update their spaces threatened\n for piece in all_pieces:\n THE_BOARD.update(piece)\n THE_BOARD.update_all_spaces_threatened()", "def _generate_cells(self) -> None:\n for i in range(15):\n for j in range(15):\n c = Cell(x=i, y=j)\n c.answer = self.puzzle.solution[j*self.width+i]\n self.cells[(j, i)] = c # row, col", "def test_4x4_1mine_1():\r\n\r\n input = ('4 4\\n'\r\n '* . . .\\n'\r\n '. . . .\\n'\r\n '. . . .\\n'\r\n '. . . .\\n'\r\n '0 0')\r\n expected_output = ('Field #1:\\n'\r\n '* 1 0 0\\n'\r\n '1 1 0 0\\n'\r\n '0 0 0 0\\n'\r\n '0 0 0 0\\n'\r\n '\\n')\r\n expected_output = expected_output.replace('\\n', os.linesep)\r\n\r\n output = minesweeper_adaptor.run_minesweeper(input)\r\n assert output == expected_output", "def __init__(self, grid_size, num_pokemon):\n self._game_board = UNEXPOSED * (grid_size ** 2)\n self._num_pokemon = num_pokemon\n self._pokemon_location = self.generate_pokemons(grid_size)", "def __init__(self, rows=9, columns=26):\n self.rows = rows\n self.columns = columns\n self.grid = [[Cell(j, i, constants.EMPTY_CELL_MARK) for i in range(rows)] for j in range(\n columns)]\n self.total_ships = 0\n self.active_ships = 0\n self.destroyed_ships = 0", "def __init__(self):\n # The starting counts are set to 0 and modified when the board is initiated.\n self.num_black_pieces = 0\n self.num_black_kings = 0\n self.num_white_pieces = 0\n self.num_white_kings = 0\n # Creates a new board and fills it with the appropriate pieces.\n self.board = self._initiate_board()\n self.moves = []", "def setup(self):\n self.board[(3, 3)] = -1\n self.board[(3, 4)] = -1\n self.board[(4, 3)] = 1\n self.board[(4, 4)] = 1\n\n self.stones_set = 4", "def populate_board(self):\n for row in range(10):\n for col in range(10):\n coord = Coordinate(row, col)\n coord_attack = Coordinate(row, col)\n self.player_table.setItem(row, col, coord)\n self.attack_table.setItem(row, col, coord_attack)", "def initialize_board(self):\n self.board = np.zeros(shape=(BOARD_SIZE, BOARD_SIZE), dtype=np.int) # another way of defining board: [[for x in range(cm.BOARD_SIZE)] for x in range(cm.BOARD_SIZE)]\n center = int(BOARD_SIZE / 2)\n self.board[center-1][center-1] = self.board[center][center] = WHITE # place the board according to position\n self.board[center][center-1] = self.board[center-1][center] = BLACK\n self.black_piece = 2\n self.white_piece = 2", "def set_mines(field):\n mines = 0\n while mines < MINES_QUANTITY:\n x, y = get_random_coord(FIELD_SIZE), get_random_coord(FIELD_SIZE)\n if field[x][y] != EMPTY_CELL:\n continue\n field[x][y] = MINE\n mines += 1\n return field", "def __init__(self,height,width,prob):\r\n self.height = height\r\n self.width = width\r\n\r\n self.prob = prob\r\n\r\n # check if prob between 0 and 1\r\n if prob < 0 or prob > 1:\r\n raise ValueError('prob should between 0 and 1')\r\n\r\n # create a uniform distribution matrix\r\n self.board = np.random.uniform(low=0,high=1,size=[height,width])\r\n # generate board matrix, '0':empty '-1':mines\r\n self.board = -(self.board < prob).astype(int)\r\n self.countmineboard = self.board.copy()\r\n self.minenum = len(self.board[(self.board == -1)])\r\n self.count_mines()", "def __init__(self):\n self.board_dict = dict()\n for i in range(self.BOARD_WIDTH):\n for j in range(self.BOARD_WIDTH):\n self.board_dict[i, j] = 0, None\n\n self.players_locations = dict()\n self.last_moved = None", "def initialize_board(self):\n seed = self.seed and self.seed.any()\n if not (self.shape or seed):\n raise Exception(\"Either a shape or a seed is required.\")\n\n elif self.shape and seed:\n # Center the seed on a game board\n board = self._center_seed(self.shape, self.seed)\n\n elif self.shape:\n # The probability a cell starts off dead\n prob_dead = [1 - self.game.weight]\n # Class probabilities for live cells\n probs_alive = [self.game.weight * (1/self.classes)] * self.classes\n\n board = np.random.choice(\n self.classes + 1,\n np.prod(self.shape),\n p = prob_dead + probs_alive\n ).reshape(self.shape)\n \n else: # Only a seed is given\n self.shape = self.seed.shape\n board = self.seed\n\n self.array = board\n self.start_array = board\n self.prev_array = None", "def create_board(self):\n # # empty 7x7 board\n # board = [[list() for x in range(7)] for y in range(7)]\n # # coordinates of starting marbles\n # black = [[0, 0], [1, 0], [1, 1], [0, 1], [6, 6], [6, 5], [5, 5], [5, 6]]\n # white = [[6, 0], [6, 1], [5, 1], [5, 0], [0, 6], [0, 5], [1, 5], [1, 6]]\n # red = [[1, 3], [2, 2], [2, 3], [2, 4], [3, 1], [3, 2], [3, 3], [3, 4], [3, 5], [4, 2], [4, 3], [4, 4], [5, 3]]\n # for marble in white:\n # board[marble[0]][marble[1]] = \"B\"\n # for marble in black:\n # board[marble[0]][marble[1]] = \"W\"\n # for marble in red:\n # board[marble[0]][marble[1]] = \"R\"\n # return board\n pass", "def __init__(self, height, width):\n self.height, self.width = height, width\n self.board = self.create_board_matrix(height, width)\n self.refresh_rate = 0.3\n self.points = 0 # pieces successfully added\n self.level = 1", "def __init__(self, grid):\n self.grid = grid\n (self.H, self.W) = self.grid.shape\n\n # Store the empty cells to simplify `random_state`\n self.empty_cells = set()\n for y, row in enumerate(grid):\n for x, is_wall in enumerate(row):\n if not is_wall:\n self.empty_cells.add((x, y))\n # Getting random empty cells uses a list.\n self.empty_cell_list = list(self.empty_cells)", "def __init__(self, rows=6, columns=7, win_length=4):\n\n self._board = [[0 for i in xrange(columns)] for i in xrange(rows)]\n self._rows = rows\n self._columns = columns\n self._win_length = win_length\n self.current_player = None\n self.winner = None\n print \"The game is afoot!\"", "def set_entries(row, col, mines):\n rows_amount.set(row)\n cols_amount.set(col)\n mine_amount.set(mines)", "def place_mines(board_size, num_mines):\n mines_placed = 0\n board = np.zeros((board_size, board_size), dtype=int)\n while mines_placed < num_mines:\n rnd = randint(0, board_size * board_size)\n x = int(rnd / board_size)\n y = int(rnd % board_size)\n if is_valid(x, y):\n if not is_mine(board, x, y):\n board[x, y] = MINE\n mines_placed += 1\n return board", "def __init__(self):\n\n self._length = 8\n self.board = []\n self.columns = \"ABCDEFGH\"\n for colNum in range(0, self._length):\n self.board.append([])\n for rowNum in range(0, self._length):\n self.board[colNum].append(Tile(colNum, rowNum))\n\n self.board[3][3].color = \"blue\"\n self.board[3][4].color = \"red\"\n self.board[4][3].color = \"red\"\n self.board[4][4].color = \"blue\"", "def test_4x4_1mine_2():\r\n\r\n input = ('4 4\\n'\r\n '. . . .\\n'\r\n '. . . .\\n'\r\n '. * . .\\n'\r\n '. . . .\\n'\r\n '0 0')\r\n expected_output = ('Field #1:\\n'\r\n '0 0 0 0\\n'\r\n '1 1 1 0\\n'\r\n '1 * 1 0\\n'\r\n '1 1 1 0\\n'\r\n '\\n')\r\n expected_output = expected_output.replace('\\n', os.linesep)\r\n\r\n output = minesweeper_adaptor.run_minesweeper(input)\r\n assert output == expected_output", "def __init__(self, board=None, workers=None):\n if board:\n self._board = []\n for row in range(self.BOARD_SIZE):\n self._board.append([])\n for col in range(self.BOARD_SIZE):\n try:\n height = board[row][col]\n except IndexError:\n height = 0\n self._board[row].append(Building(height))\n else:\n self._board = [[Building() for col in range(self.BOARD_SIZE)]\n for row in range(self.BOARD_SIZE)]\n\n if workers:\n self._workers = workers\n else:\n self._workers = {}", "def draw_minefield(self):\n # Resets member variables to initial values\n self.is_initialized = False\n self.is_game_over = False\n self.mines[:, :] = False\n self.mines_count[:, :] = 0\n self.flags[:, :] = False\n self.revealed[:, :] = False\n\n # Clears plot, sets limits\n self.ax.clear()\n self.ax.set_aspect('equal')\n self.ax.axis('off')\n self.ax.set_xlim(-.6, self.width - .4)\n self.ax.set_ylim(-.6, self.height - .4)\n\n # Draws grid lines\n for j in np.arange(-.5, self.width):\n self.ax.plot([j, j], [-.5, self.height-.5], lw=1, color='k')\n for i in np.arange(-.5, self.height):\n self.ax.plot([-.5, self.width-.5], [i, i], lw=1, color='k')\n\n # Connects mouse click and key press event handlers and coordinates formatter\n if self.cid_mouse is None:\n self.cid_mouse = self.fig.canvas.mpl_connect('button_press_event', self.on_mouse_click)\n self.cid_key = self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)\n self.ax.format_coord = _CoordsFormatter(self.width, self.height)\n\n # Title text: number of flags/total mines\n self.title_txt = self.ax.set_title(\n '{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))\n\n self.refresh_canvas()", "def __init__(self):\n self.game_board = [' '] * 9\n self.size = len(self.game_board)\n self.move = 'X'\n self.player1 = None\n self.player2 = None\n self.current_player = None\n self.board_coords = {\n (1, 3): 0, (2, 3): 1, (3, 3): 2,\n (1, 2): 3, (2, 2): 4, (3, 2): 5,\n (1, 1): 6, (2, 1): 7, (3, 1): 8\n }\n\n self.winning_cases = [\n (0, 1, 2), (3, 4, 5), (6, 7, 8),\n (0, 3, 6), (1, 4, 7), (2, 5, 8),\n (0, 4, 8), (2, 4, 6)\n ]", "def init(self, windowsize:tuple):\r\n y_count, x_count = 3, 0 #< Set the starting counter for the look_up_table. y starts with three because the first three lines are just Nones\r\n # Creating the constant maze \r\n maze_size = windowsize[0], windowsize[1] - 2 * self.grid_size\r\n self.maze = pg.Surface(maze_size) \r\n \r\n \r\n \r\n # Draw the outermost rectangles on self.maze\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((0, 3 * self.grid_size), (28 * self.grid_size, 31 * self.grid_size)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((0 + self.grid_size // 2, 3 * self.grid_size + self.grid_size // 2),(27 * self.grid_size, 30 * self.grid_size)), 4) \r\n # Draw the inner rectangles\r\n for y in self.look_up_table[3 : -2]: #< y is a list of one row from the maze\r\n for x in y: #< x is a string that is decoded as already explained\r\n pos = [self.grid_size * x_count, self.grid_size * y_count]\r\n # Set reference position in the middle of one square\r\n pos[0] += self.grid_size // 2\r\n pos[1] += self.grid_size // 2\r\n x_count += 1\r\n # Check if x is rectangle\r\n if x != None and x[0] == 'r':\r\n # When the size of the string is equal or greater than 4 it's rectangle with a specific size and not just a border.\r\n if len(x) >= 4:\r\n # get the x and y size of the rectangle. x will be something like 'rx1_y1' x1 resprestens the size in x direction and y1 in y direction.\r\n xy_dim = x[1:].split(\"_\") \r\n xy_dim[0] = int(xy_dim[0])\r\n xy_dim[1] = int(xy_dim[1])\r\n rect = tuple(pos), (xy_dim[0] * self.grid_size , xy_dim[1] * self.grid_size )\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], rect, self.width)\r\n # If the last char is a w (white), u (up) or l (left) a line gets draw one a specific position \r\n if x[-1] == 'w':\r\n self.draw_line(self.maze, 'u', (x_count,y_count), True)\r\n if x[-1] == 'u' or x[-1] == 'l':\r\n if x_count == 0:\r\n self.draw_line(self.maze, x[-1], (len(y), y_count))\r\n else:\r\n self.draw_line(self.maze, x[-1], (x_count, y_count))\r\n \r\n y_count += 1\r\n x_count = 0\r\n # Just some cosmetic drawing\r\n pg.draw.rect(self.maze, Colors.colors['BLACK'], ((0, 12 * self.grid_size + self.grid_size // 2 + 4), (self.grid_size // 2 + 1, 10 * self.grid_size - 4)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLACK'], ((28 * self.grid_size - self.grid_size // 2 - 1, 12 * self.grid_size + self.grid_size // 2 + 4), (self.grid_size // 2 + 1, 10 * self.grid_size - 4)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((-self.width, 13 * self.grid_size), (5 * self.grid_size, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((-self.width, 19 * self.grid_size), (5 * self.grid_size, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((23 * self.grid_size, 13 * self.grid_size), (5 * self.grid_size + 10, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((23 * self.grid_size, 19 * self.grid_size), (5 * self.grid_size + 10, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((11 * self.grid_size, 16 * self.grid_size), (6 * self.grid_size, 3 * self.grid_size)), self.width)\r\n \r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (0, 16 * self.grid_size + self.grid_size // 2 - 1), (self.grid_size // 2 + self.width, 16 * self.grid_size + self.grid_size // 2 - 1), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (0, 18 * self.grid_size + self.grid_size // 2), (self.grid_size // 2 + self.width, 18 * self.grid_size + self.grid_size // 2), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (self.grid_size * 28 - self.grid_size, 16 * self.grid_size + self.grid_size // 2 - 1), (self.grid_size * 28 + self.width, 16 * self.grid_size + self.grid_size // 2 - 1), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (self.grid_size * 28 - self.grid_size, 18 * self.grid_size + self.grid_size // 2), (self.grid_size * 28 + self.width, 18 * self.grid_size + self.grid_size // 2), self.width)\r\n self.is_init = True", "def create(self):\n\n for i in range(8):\n # Create white pawns\n self.board[1][i] = Piece(\"pawn\", 1, i, 0)\n # Create black pawns\n self.board[6][i] = Piece(\"pawn\", 6, i, 1)\n\n # Create white rooks\n self.board[0][0] = Piece(\"rook\", 0, 0, 0)\n self.board[0][7] = Piece(\"rook\", 0, 7, 0)\n\n # Create black rooks\n self.board[7][0] = Piece(\"rook\", 7, 0, 1)\n self.board[7][7] = Piece(\"rook\", 7, 7, 1)\n\n # Create white knights\n self.board[0][1] = Piece(\"knight\", 0, 1, 0)\n self.board[0][6] = Piece(\"knight\", 0, 6, 0)\n\n # Create black knights\n self.board[7][1] = Piece(\"knight\", 7, 1, 1)\n self.board[7][6] = Piece(\"knight\", 7, 6, 1)\n\n # Create white bishop\n self.board[0][2] = Piece(\"bishop\", 0, 2, 0)\n self.board[0][5] = Piece(\"bishop\", 0, 5, 0)\n\n # Create black bishop\n self.board[7][2] = Piece(\"bishop\", 7, 2, 1)\n self.board[7][5] = Piece(\"bishop\", 7, 5, 1)\n\n # Create white queen and king\n self.board[0][3] = Piece(\"queen\", 0, 3, 0)\n self.board[0][4] = Piece(\"king\", 0, 4, 0)\n\n # Create black queen and king\n self.board[7][3] = Piece(\"queen\", 7, 3, 1)\n self.board[7][4] = Piece(\"king\", 7, 4, 1)", "def __init__(self):\r\n # Screen settings\r\n self.screen_width = 990\r\n self.screen_height = 990\r\n self.bg_color = (115, 204, 0)\r\n self.player_speed = 30\r\n self.enemy_speed = 45\r\n self.bomb_width = 90\r\n self.bomb_height = 90\r\n self.bomb_color = (96,96,96)\r\n self.max_bombs = 1\r\n self.bomb_radius = 45\r\n self.color_1 = (200, 200, 200)\r\n self.color_2 = (0, 0, 0)\r\n self.row_width = self.screen_width / 11\r\n self.col_width = self.screen_width / 11\r\n\r\n self.red_points = 0\r\n self.blue_points = 0\r\n\r\n self.wall_types = {\r\n 'wall': 1,\r\n 'barell': 2,\r\n }", "def update_cells(self):\n mineboard = self.mineboard\n gameboard = mineboard.gameboard\n for change in mineboard.changes:\n i, j = change[0], change[1]\n text_val = gameboard[i][j]\n\n if text_val == 'M':\n self.canvas.delete(self.cells[i][j])\n self.cells[i][j] = self.canvas.create_image(\n 2+j*CELLWIDTH, 2+i*CELLWIDTH, image=EXPLODED, anchor='nw')\n self.reveal_mines(i, j)\n\n elif text_val == 'F':\n self.canvas.delete(self.cells[i][j])\n self.cells[i][j] = self.canvas.create_image(\n 2+j*CELLWIDTH, 2+i*CELLWIDTH, image=FLAG, anchor='nw')\n\n elif text_val == ' ':\n self.canvas.delete(self.cells[i][j])\n self.cells[i][j] = self.canvas.create_rectangle(\n 2+j*CELLWIDTH, 2+i*CELLWIDTH, (j+1)*CELLWIDTH, (i+1)*CELLWIDTH, fill=DEFAULT_COLOR, outline=\"\")\n\n elif text_val in ['0', '1', '2', '3', '4', '5', '6', '7', '8']:\n self.canvas.itemconfig(\n self.cells[i][j], fill=COLORS[int(text_val)])\n if text_val != '0':\n # offset here is by 12 pixels\n self.canvas.create_text(\n 2+j*CELLWIDTH+(CELLWIDTH-1)//2, 2+i*CELLWIDTH+(CELLWIDTH-1)//2, anchor='center', text=f\"{text_val}\")\n\n mineboard.changes = [] # removes previous changes\n if mineboard.gamestate is not None:\n # if the game has ended displays game end message and buttons\n self.win_lose_lbl.grid(row=3, column=0, columnspan=4)\n self.win_lose_msg.set(\n f\"You {self.mineboard.gamestate}! Play again?\")\n self.same_again_bttn.grid(row=4, column=0, columnspan=2)\n self.play_again_bttn.grid(row=4, column=2, columnspan=2)", "def __init__(self, window: pg.Surface):\n self.window = window\n self.board_matrix = np.full(Dimension.board_size(), 1)\n self.maximum_obstacles_on_board = 10\n self.obstacles = self.create_obstacles()", "def new_tile(self):\r\n # replace with your code\r\n empty_square_lists = []\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if(self.get_tile(row, col) == 0):\r\n empty_square_lists.append((row, col))\r\n \r\n if len(empty_square_lists) == 0:\r\n return \"game over!\"\r\n \r\n random_cell = random.choice(empty_square_lists)\r\n random_cell_row = random_cell[0]\r\n random_cell_col = random_cell[1]\r\n \r\n values = [2] * 90 + [4] * 10\r\n value = random.choice(values)\r\n \r\n self.set_tile(random_cell_row, random_cell_col, value)", "def reset(self):\r\n # replace with your code\r\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\r\n self.new_tile()\r\n self.new_tile()", "def initialize(self, i, j):\n population = set(range(self.n))\n population.remove(i*self.width + j) # removes initial click\n idx = random.sample(population, self.n_mines) # choose mines\n\n # Sets mines\n self.mines[self.i[idx], self.j[idx]] = True\n # Sets neighbor mines counter\n for i, j in zip(self.i, self.j):\n self.mines_count[i, j] = self.count_neighbor_mines(i, j)\n # Sets wrong guesses\n self.wrong = ~self.mines & self.flags\n\n # Initializes plot objects\n self.flags_pts, = self.ax.plot([], [], 'k>', ms=8)\n self.revealed_img = self.ax.imshow(self.revealed, vmin=0, vmax=4, cmap='gray_r')\n self.wrong_img = self.ax.imshow(self.wrong, vmin=0, vmax=1, cmap=self.cmap_reds_alpha)\n\n # Initializes text objects of neighbor mines counter. They're\n # initially set as non visible. As the cells are revealed, their\n # status is changed to visible\n p_count = self.mines_count > 0\n for i, j, count in zip(self.ii[p_count], self.jj[p_count], self.mines_count[p_count]):\n self.mines_count_txt[i, j] = self.ax.text(j, i, str(count), fontweight='bold',\n color=self.color_dict[count], ha='center',\n va='center', visible=False)\n self.is_initialized = True\n\n self.refresh_canvas()", "def reset(self):\r\n # creating the grid with the values all initialized to zero\r\n \r\n self._grid = [[ 0 for dummy_col in range(self._width)]\r\n for dummy_row in range(self._height)]\r\n # introducing the two initial tiles\r\n self.new_tile()\r\n self.new_tile()\r\n #for testing purposes\r\n #print self.grid\r\n #print self\r", "def start_new_game(self):\r\n\r\n self.initialize_game_params()\r\n self.timer = Timer(self.screen)\r\n self.mine_counter = MineCounter(self.num_of_mines, self.screen)\r\n self.reset_button = ResetButton(self.screen)\r\n self.high_score = HighScore(self.rows, self.cols, self.num_of_mines, self.screen)\r\n self.board = Board(self.rows, self.cols, self.num_of_mines, self.screen)\r\n self.play_game()", "def __init__(self, num_players):\n self.num_players = num_players\n self.firework = [[], [], [], [], []]\n self.nb_blue_stone = MAX_BLUE_STONE\n self.nb_red_stone = MAX_RED_STONE\n self.draw = None\n self.hands = None\n self.fill_draw()\n random.shuffle(self.draw)\n self.discard = []\n self.draw_initial_hands()", "def __initialize_grid_dimensions(self) -> None:\n if len(self.players) > 3:\n self.num_columns = len(self.players)\n self.num_rows = self.num_cells // len(self.players)\n else:\n self.num_columns = self.num_cells // len(self.players)\n self.num_rows = len(self.players)\n self.grid_handler = GridHandler(self.num_columns, self.num_rows)", "def reset(self) -> None:\n self.map = []\n for col in range(self.width):\n self.map.append([])\n for cell in range(self.height):\n if col > 1 and col < self.width - 2:\n if cell == 0:\n # World Barrier - Top Middle\n self.map[col].append(StaticTile('wall_3', self.graphicsLibrary.get('wall_3'), (self.scaleWidth,self.scaleHeight), barrier=True))\n elif cell == self.height - 1:\n # World Barrier - Bottom Middle\n self.map[col].append(StaticTile('wall_12', self.graphicsLibrary.get('wall_12'), (self.scaleWidth,self.scaleHeight), barrier=True))\n else:\n # Playable Map Area\n if (col % 2) != 0 and (cell % 2) == 0:\n # Hard-Barrier Generation\n self.map[col].append(StaticTile('solid', self.graphicsLibrary.get('solid'), (self.scaleWidth,self.scaleHeight), barrier=True))\n elif (col,cell) in self.spawn_buffers:\n # Preserve Potential Spawn Points\n self.map[col].append(StaticTile('terrain', self.graphicsLibrary.get('terrain'), (self.scaleWidth,self.scaleHeight), barrier=False))\n elif random.randint(0, 2) == 0:\n # Soft-Barrier Generation\n self.map[col].append(DynamicTile('destructable_new', self.graphicsLibrary.get('destructable_new'), (self.scaleWidth,self.scaleHeight), destructable=\"True\", barrier=True, death_animation=self.animations_library.get('destructable_death')))\n else:\n # Fill Remaining Terrain\n self.map[col].append(StaticTile('terrain', self.graphicsLibrary.get('terrain'), (self.scaleWidth,self.scaleHeight), barrier=False))\n else:\n # World Barrier - Side Sections\n if col == 0 or col == self.width - 1:\n # Roof\n right_most_columns = False\n if col == self.width - 1:\n right_most_columns = True\n\n if cell == self.height - 1:\n self.map[col].append(StaticTile('wall_10', self.graphicsLibrary.get('wall_10'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == self.height - 2:\n self.map[col].append(StaticTile('wall_1', self.graphicsLibrary.get('wall_1'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 0:\n self.map[col].append(StaticTile('wall_1', self.graphicsLibrary.get('wall_1'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n else:\n self.map[col].append(StaticTile('wall_5', self.graphicsLibrary.get('wall_5'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif col == 1 or col == self.width - 2:\n # Floor \n right_most_columns = False\n if col == self.width - 2:\n right_most_columns = True\n\n if cell == self.height -1:\n self.map[col].append(StaticTile('wall_11', self.graphicsLibrary.get('wall_11'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == self.height - 2:\n self.map[col].append(StaticTile('wall_9', self.graphicsLibrary.get('wall_9'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 0:\n self.map[col].append(StaticTile('wall_2', self.graphicsLibrary.get('wall_2'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 1:\n self.map[col].append(StaticTile('wall_6', self.graphicsLibrary.get('wall_6'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n else:\n self.map[col].append(StaticTile('wall_7', self.graphicsLibrary.get('wall_7'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n self.map[col][cell].place_at(topleft=(self.scaleWidth * col, self.scaleHeight * cell))", "def __init__(self, n):\n self.rows = [0 for _ in range(n)]\n self.columns = [0 for _ in range(n)]\n # First diagonal x+y, second y-x\n self.diagonal = [0, 0]\n self.score = {1: 1, 2: n+1}\n self.win = {1: n, 2: (n+1)*n}\n self.size = n", "def _generate_maze(self):\n grid = [[GridCell(x, y, self._treasure_prob) for x in range(self._map_size)] for y in range(self._map_size)]\n\n center_x = self._map_size // 2\n center_y = self._map_size // 2\n\n for _ in range(self._sparsity):\n current = grid[center_x][center_y]\n stack = list()\n start = True\n while len(stack) or start:\n start = False\n current.visited = True\n children = current.has_children(grid)\n\n if children:\n choice = np.random.choice(children)\n choice.visited = True\n\n stack.append(current)\n\n self._remove_walls(current, choice)\n\n current = choice\n\n elif stack:\n current = stack.pop()\n for row in grid:\n for cell in row:\n cell.visited = False\n\n # edit center area\n grid[center_x][center_y].set_treasury()\n for x in range(center_x - 1, center_x + 2):\n for y in range(center_y - 1, center_y + 2):\n grid[x][y].erase_walls()\n return grid", "def __init__(self):\n self.rows = None\n self.columns = None\n self.squares = None\n # max is useful as a way to track range for iteration, and also as a way\n # to track the maximum number in any spot.\n self.max = 0", "def __init__(self, width, height):\n roomDict = {}\n for w in range(width):\n for h in range(height):\n roomDict[Position(w, h)] = 'dirty'\n self.tiles = roomDict\n self.width = width\n self.height = height", "def test_4x4_no_mine():\r\n\r\n input = ('4 4\\n'\r\n '. . . .\\n'\r\n '. . . .\\n'\r\n '. . . .\\n'\r\n '. . . .\\n'\r\n '0 0')\r\n expected_output = ('Field #1:\\n'\r\n '0 0 0 0\\n'\r\n '0 0 0 0\\n'\r\n '0 0 0 0\\n'\r\n '0 0 0 0\\n'\r\n '\\n')\r\n expected_output = expected_output.replace('\\n', os.linesep)\r\n\r\n output = minesweeper_adaptor.run_minesweeper(input)\r\n assert output == expected_output", "def new_tile(self):\n \n empty_items = []\n for row in range(self.get_grid_height()):\n for col in range(self.get_grid_width()):\n if self.get_tile(row, col) == 0:\n empty_items.append((row, col))\n \n random_row = 0\n random_col = 0\n if len(empty_items) != 0:\n random_empty_tile = random.randrange(0, len(empty_items))\n (random_row, random_col) = empty_items[random_empty_tile]\n else:\n return\n # the % of getting \"4\" from 0~9 is 10%\n random_time = random.randrange(0, 10)\n \n if random_time == 4:\n self._cells[random_row][random_col] = 4\n else:\n self._cells[random_row][random_col] = 2", "def test_generate_board_negative_mines_errors(self):\n # arrange\n game = minesweeper.Minesweeper()\n width = 10\n height = 12\n mines = -1\n\n # act and expect error\n with self.assertRaises(ValueError):\n game.generate_board(width, height, mines)", "def _create_cells(self):\n\t\tcellId=0\n\t\t# Iterate over all dictionaries\n\t\tfor muscle,muscAfferentDelay in self._infoMuscles:\n\t\t\tfor cellInfo in self._infoCommonCellsInMuscles:\n\t\t\t\tcellClass = cellInfo[0]\n\t\t\t\tcellName = cellInfo[1]\n\t\t\t\tcellNumber = cellInfo[2]\n\t\t\t\tif len(cellInfo)>=4: neuronParam = cellInfo[3]\n\t\t\t\telse: neuronParam = None\n\t\t\t\tcellId = self._create_cell_population(cellId,muscle,muscAfferentDelay,cellClass,cellName,cellNumber,neuronParam)\n\t\t# Add special cells\n\t\tfor cellInfo in self._infoSpecialCells:\n\t\t\tgroupOrMuscle = cellInfo[0]\n\t\t\tcellClass = cellInfo[1]\n\t\t\tcellName = cellInfo[2]\n\t\t\tcellNumber = cellInfo[3]\n\t\t\tif len(cellInfo)>=5: neuronParam = cellInfo[4]\n\t\t\telse: neuronParam = None\n\t\t\tmuscAfferentDelay = None\n\t\t\tcellId = self._create_cell_population(cellId,groupOrMuscle,muscAfferentDelay,cellClass,cellName,cellNumber,neuronParam)\n\n\t\tself._motoneuronsNames = self._intMotoneuronsNames+self._realMotoneuronsNames\n\t\tself._afferentsNames = self._primaryAfferentsNames+self._secondaryAfferentsNames", "def __init__(self, n):\n self.matrix = [[0 for i in range(n)] for j in range(n)]\n self.winning = False", "def init_place(self):\n for i in range(self.numCells):\n x = randint(0,self.nx)\n y = randint(0,self.ny)\n while not self.is_empty(x,y):\n x = randint(0, self.nx)\n y = randint(0, self.ny)\n assert self.put_cell(x, y, i) is True\n self.cells.append(Cell(x,y))\n\n assert self.calc_cost() is True", "def __init__(self):\n self.board = {} # dict of (x,y) to PlacedTile\n self.board[(0,0)] = STARTING_PIECE", "def __init__(self, width=7, height=6):\n self.width = width\n self.height = height\n self.board = self.createBoard()", "def test_4x4_16mines():\r\n\r\n input = ('4 4\\n'\r\n '* * * *\\n'\r\n '* * * *\\n'\r\n '* * * *\\n'\r\n '* * * *\\n'\r\n '0 0')\r\n expected_output = ('Field #1:\\n'\r\n '* * * *\\n'\r\n '* * * *\\n'\r\n '* * * *\\n'\r\n '* * * *\\n'\r\n '\\n')\r\n expected_output = expected_output.replace('\\n', os.linesep)\r\n\r\n output = minesweeper_adaptor.run_minesweeper(input)\r\n assert output == expected_output", "def __init__(self,height,width): \r\n self.width = 2*(width//2) + 1 # Make width odd\r\n self.height = 2*(height//2) + 1 # Make height odd\r\n\r\n # grid of cells\r\n self.grid = [[0 for j in range(self.width)] for i in range(self.height)]\r\n\r\n # declare instance variable\r\n self.visited = [] # visited cells\r\n self.unvisited = [] # unvisited cells\r\n self.path = dict() # random walk path\r\n\r\n # valid directions in random walk\r\n self.directions = [(0,1),(1,0),(0,-1),(-1,0)]\r\n\r\n # indicates whether a maze is generated\r\n self.generated = False\r\n\r\n # shortest solution\r\n self.solution = []\r\n self.showSolution = False\r\n self.start = (0,0)\r\n self.end = (self.height-1,self.width-1)", "def __init__(self, board_size, ships):\r\n self.__board_size = board_size\r\n self.__ships = ships\r\n self.__bombs_dict = dict()", "def init_board(self) -> None:\n\t\tself.canvas.create_rectangle(0, 0, self.canvas_width, self.canvas_height, fill=self.color_background)\n\t\tfor x in range(0, self.canvas_width, self.canvas_width//self.board_size):\n\t\t\tself.canvas.create_line(x, 0, x, self.canvas_width, fill=self.color_tile_border)\n\n\t\tfor y in range(0, self.canvas_width+1, self.canvas_height//self.board_size):\n\t\t\tself.canvas.create_line(0, y, self.canvas_height, y, fill=self.color_tile_border)\n\n\t\tself.text_area.delete('0.1', '2.1')" ]
[ "0.72239524", "0.71592915", "0.7094593", "0.6917509", "0.6810791", "0.67366654", "0.67292786", "0.6627893", "0.6587037", "0.65448916", "0.65435743", "0.6459289", "0.6438166", "0.64203", "0.6411018", "0.640888", "0.6347741", "0.6320402", "0.62783587", "0.6267229", "0.62347484", "0.62306005", "0.6209381", "0.615682", "0.6155103", "0.61526054", "0.6132393", "0.61315066", "0.61267287", "0.61183393", "0.61150557", "0.6093923", "0.6087944", "0.6053407", "0.6053246", "0.60337806", "0.6032544", "0.6028984", "0.6014753", "0.60145396", "0.6013252", "0.60078937", "0.599", "0.59457344", "0.5944524", "0.59421927", "0.594049", "0.59324676", "0.5930383", "0.59242404", "0.5914062", "0.59118485", "0.5907466", "0.5906046", "0.5888942", "0.58844924", "0.58813864", "0.5878171", "0.5875072", "0.5867864", "0.58674204", "0.5867347", "0.58529377", "0.584781", "0.58447886", "0.58345133", "0.5833485", "0.58308655", "0.5826475", "0.58236647", "0.58216864", "0.58198607", "0.58196986", "0.5818121", "0.5809303", "0.5807936", "0.58073467", "0.5806609", "0.58058625", "0.5802266", "0.5797999", "0.5789294", "0.57890826", "0.57854235", "0.5775142", "0.5770647", "0.5759158", "0.57580376", "0.5747911", "0.5746493", "0.5742194", "0.5739855", "0.5727275", "0.57265943", "0.5718112", "0.57175124", "0.5709945", "0.56997633", "0.56942004", "0.56938815" ]
0.715291
2
prints the table, regardless whether it's a game state table or the answer table.
def print_table(table, exploded_at=[-1, -1]): # color codes just to look pretty NORMAL = '\33[10m' BLUE_START = '\33[104m' RED_START = '\33[31m' PURPLE_START = '\33[35m' GREEN_START = '\33[92m' ORANGE_START = '\33[93m' END = '\033[0m' s = ' %s' % BLUE_START # print number headers along x-axis for i in range(0, width): s += " %s" % i if i < 10: s += " " * 2 else: s += " " s += "%s\n" % END # print letters for y-axis, + the relevant values in each coordinate # depending on table. for y in range(0, height): s += "%s %s %s \t" % (BLUE_START, Minesweeper.letters[y], END) for x in range(0, width): value = table[y][x] if value == "0": s += "%s%s%s" % (NORMAL, value, END) elif value == "1": s += "%s%s%s" % (GREEN_START, value, END) elif value == "2": s += "%s%s%s" % (ORANGE_START, value, END) elif value == "3": s += "%s%s%s" % (RED_START, value, END) elif value == "4" or value == "5" or value == "6" or value == "7" or value == "8": s += "%s%s%s" % (PURPLE_START, value, END) # special elif value == "-": s += "%s%s%s" % (NORMAL, value, END) elif value == Minesweeper.BOMB: if y == exploded_at[0] and x == exploded_at[1]: # Make the bomb at the casualty site explode! s += "%s%s%s" % (RED_START, Minesweeper.EXPLOSION, END) else: # show normal bomb s += "%s%s%s" % (RED_START, value, END) elif value == Minesweeper.FLAG: s += "%s%s%s" % (RED_START, value, END) s += " " * 3 s += "\n" # use tabbing to space them nicely print s.expandtabs(3)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def showState(self):\n for i in self.state[0]:\n for j in self.state[1]:\n print(self.table[i][j], end=\"\")\n print(\"\")", "def displayGame(self):\n # row1 & row2 longer, row3 & row4 shorter, proper indented below\n print 'current table:'\n for key in ['row1','row2']:\n rowLs = self.table[key]\n string = ''\n for ele in rowLs:\n tmpStr = str(ele) + '\\t'\n string += tmpStr\n print string\n for key in ['row3','row4']:\n string = '\\t'\n rowLs = self.table[key]\n for ele in rowLs:\n tmpStr = str(ele) + '\\t'\n string += tmpStr\n print string \n print 'discardList:'\n print self.discardLs[0],'\\t',self.discardLs[1],'\\n',self.discardLs[2],'\\t',self.discardLs[3]", "def print_tables(self):\n print \"------------------\\nTables\\n------------------\"\n cnt = 0\n for x in self.show_tables():\n cnt += 1\n print (\"{0}.) {1}\".format(cnt, x[0]))", "def print_table(self):\n print(\"%-12s%-12s%-12s%-12s%-12s\" % (\"index\",\"balance\",\"payment\",\"interest\",\"amortization\"))\n print(\"-------------------------------------------------------------\")\n for i in self.table[\"index\"]:\n print(\"%-12i%-12i%-12i%-12i%-12i\" % (self.table[\"index\"][i],self.table[\"balance\"][i]\\\n ,self.table[\"payment\"][i],self.table[\"interest\"][i],\\\n self.table[\"amortization\"][i]))", "def printqtable(self):\n\t\tout = \"PRINTING QTABLE\\n\"\n\t\tfor key in self.qtable:\n\t\t\tout += \"state: \" + str(key) + \"\\n\"\n\t\t\tfor i in range(self.game.pips+1):\n\t\t\t\tout += f\"rew{i}: {self.qtable[key][i]:.3f} \"\n\t\t\tout += \"\\n\"\n\t\treturn out", "def print_table():\n for key in _op_table.keys():\n print(key)\n for sub_key in _op_table[key]:\n print('\\t--' + sub_key)", "def __print_work_table(table):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % ('Act', 'Pred', 'Block', 'Dummy', 'Succ', 'start', 'end')\n for k, col in sorted(table.items()):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % tuple(\n [str(k)] + [list(col[0])] + [str(col[i]) for i in range(1, len(col))])", "def print_table(self, table):\n raise NotImplementedError('print_table method not defined!')", "def print_mistakes_table():\n conn = sq.connect(host='localhost', user='root',\n password='student', database='quiz')\n cursor = conn.cursor()\n\n cursor.execute(\"select * from mistakes\")\n data = cursor.fetchall()\n\n table = PrettyTable()\n table.field_names = ['Question', 'Given Answer','User Given Answer']\n for row in data:\n table.add_row(row)\n conn.close()\n\n return table", "def print_table(self) -> None:\n if (self.probability_links == None):\n print(\"+--------+\")\n print(f\"| P({self.key:1s}) |\")\n print(\"+--------+\")\n print(f\"| {self.probability_values[0]:0.04f} |\")\n print(\"+--------+\")\n else:\n arg_len = 2 + len(' '.join(self.probability_links.keys()))\n param_len = 2 + \\\n max(6, len(\"P(A|)\" + \",\".join(self.probability_links.keys())))\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")\n print(\n f\"| {' '.join(self.probability_links.keys())} | P({self.key}|{','.join(self.probability_links.keys())}) |\")\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")\n for i in range(2**len(self.probability_links.keys())):\n # Gives us a string binary value to make truth table off of\n bool_key = f\"{i:0{len(self.probability_links.keys())}b}\"\n print(\n f\"| {' '.join(['T' if bool_key[j] == '0' else 'F' for j in range(len(self.probability_links.keys()))])} | {f'{self.probability_values[i]:0.04f}':<{param_len-1}s}|\")\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")", "def print_table(hdrs, flag=False, data=[],fmt='psql'):\n\tres = cur.fetchall()\n\tif flag:\n\t\tres = data\n\tprint(tabulate(res, headers=hdrs, tablefmt=fmt))", "def print_table(table):\n for i in range(len(table)):\n print \"Row \", i, \"\\t\",\n for j in range(len(table[i])):\n print table[i][j],\n print \"\\n\"", "def print_table(table):\n for row in table:\n print(row)", "def print_table(table):\n for row in table:\n print(row)", "def print_table(table):\n for row in table:\n print(row)", "def print_table(self, table, connection=None):\n\n connection = connection or self.engine.connect()\n result = connection.execute(select([table]))\n print(\n '-----------------------------------------------------------'\n '\\nColumns:\\n\\t{}\\nData:\\n\\t{}\\n'\n '-----------------------------------------------------------'.format(\n table.columns, '\\n\\t'.join(str(row) for row in result)\n )\n )\n\n result.close()", "def print_league_table(league_table):\n output_str = \"Team Name\\t\\t\" + '\\t'.join(constants.POSITIONS + constants.STATS) + '\\n'\n for team in constants.TEAMS:\n output_str += f\"{team}: \"\n for position_or_stat in constants.POSITIONS + constants.STATS:\n output_str += '\\t' + f\"{league_table[team][position_or_stat]}\"\n output_str += \"\\n\"\n print(output_str)", "def print_curation_table(self, **kwargs) -> None:\n s = self.get_curation_table(**kwargs)\n if s:\n print(s) # noqa:T201", "def print_table(table):\n # transpose the table:\n table = map(list, zip(*table))\n # get the column width:\n col_width = [max(len(str(x)) for x in col) for col in zip(*table)]\n # print it to screen:\n print\n for line in table:\n print \"| \" + \" | \".join(\"{:{}}\".format(x, col_width[i]) for i, x in enumerate(line)) + \" |\"\n print", "def print_table(emojis):\n if len(emojis) > 0:\n table = []\n for i in emojis:\n table.append([i.get('id'), i.get('title'), i.get('emoji')])\n print(tabulate(table, headers=[\"ID\", \"Title\", \"Emoji\"]))\n else:\n print(\"¯\\_(ツ)_/¯ Nothing to see here...\")", "def print_table(table, fieldnames):\n print(\"{:<19}\".format(fieldnames[0]), end='')\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(field), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(row[field]), end='')\n print(\"\", end='\\n')", "def print_table(table, fieldnames):\n print(\"{:<19}\".format(fieldnames[0]), end='')\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(field), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(row[field]), end='')\n print(\"\", end='\\n')", "def render_table(game):\n return ''.join(render_player_card_fragment(c, player=p)\n for p, c in game.round.current_trick)", "def print_table(table):\n rest = table[1:]\n fmt = \"%-28s %-9s %-16s %s\"\n for row in rest:\n print(fmt % tuple(row))", "def print_table(response, title):\n print title + ':'\n\n if 'rows' not in response:\n print 'Empty response'\n return\n\n rows = response['rows']\n row_format = '{:<20}' + '{:>20}' * 4\n print row_format.format('Keys', 'Clicks', 'Impressions', 'CTR', 'Position')\n for row in rows:\n keys = ''\n # Keys are returned only if one or more dimensions are requested.\n if 'keys' in row:\n keys = u','.join(row['keys']).encode('utf-8')\n print row_format.format(\n keys, row['clicks'], row['impressions'], row['ctr'], row['position'])", "def print_table(table):\r\n print('/-----------------------------------------------------------------------------------\\\\')\r\n for item in table:\r\n\r\n while len(item[1]) <= 22:\r\n item[1] += ' '\r\n\r\n while len(item[2]) <= 27:\r\n item[2] += ' '\r\n\r\n while len(item[0]) <= 15:\r\n item[0] += ' '\r\n\r\n print('| '+item[0]+' | '+item[1]+'| '+item[2]+' |')\r\n\r\n print('\\\\-----------------------------------------------------------------------------------/')", "def show_table(self, keys=None, sort_keys_function=None):\n rows = []\n output_keys = keys or self.keys\n\n for item in self.__get_items(sort_keys_function):\n row = []\n for output_key in output_keys:\n row.append(getattr(item, self.mapping[output_key]))\n rows.append(row)\n print(tabulate(rows, output_keys))", "def print_table(table):\n for row in table:\n # Header column left justified\n print(\"{:<19}\".format(row[0]), end='')\n # Remaining columns right justified\n for col in row[1:]:\n print(\"{:>4}\".format(col), end='')\n print(\"\", end='\\n')", "def print(self):\n for i in range(self.rows):\n print(\"--\" * self.cols + \"-\")\n for j in range(self.cols):\n cell = self.get_game_cell(i, j)\n if cell is None:\n print(f'({i} - {j}): failed')\n return None\n if cell.status == 'EMPTY':\n print(\"| \", end=\"\")\n else:\n print(f\"|{cell.status}\", end=\"\")\n print(\"|\")\n print(\"--\" * self.cols + \"-\")\n print(f\"Completed({self.completed}) - {self.winner}\")", "def print_truth_table(formula):\r\n # Task 2.5\r\n variables = list(formula.variables())\r\n sorted_variables = sorted(variables)\r\n print(\"|\", end=\"\")\r\n for variable in sorted_variables:\r\n print(\" \" + variable + \" |\", end=\"\")\r\n print(\" \" + formula.infix() + \" |\")\r\n print(\"|\", end=\"\")\r\n for variable in sorted_variables:\r\n current_variable_hyphens = \"\"\r\n for letter in range(len(variable)):\r\n current_variable_hyphens += \"-\"\r\n print(\"-\" + current_variable_hyphens + \"-|\", end=\"\")\r\n formula_hyphens = \"\"\r\n for letter in range(len(formula.infix())):\r\n formula_hyphens += \"-\"\r\n print(\"-\" + formula_hyphens + \"-|\")\r\n models = list(all_models(sorted_variables))\r\n values = truth_values(formula, models)\r\n formula_spaces = \"\"\r\n for letter in range(len(formula.infix()) - 1):\r\n formula_spaces += \" \"\r\n for model, value in zip(models, values):\r\n print(\"|\", end=\"\")\r\n for variable in sorted_variables:\r\n variable_spaces = \"\"\r\n for i in range(len(variable)):\r\n variable_spaces += \" \"\r\n if model[variable]:\r\n print(\" T\" + variable_spaces + \"|\", end=\"\")\r\n else:\r\n print(\" F\" + variable_spaces + \"|\", end=\"\")\r\n if value:\r\n print(\" T\" + formula_spaces + \" |\")\r\n else:\r\n print(\" F\" + formula_spaces + \" |\")", "def show_table(table):\n # id: string\n # Unique and random generated (at least 2 special char()expect: ';'),\n # 2 number, 2 lower and 2 upper case letter)\n # title: string\n # manufacturer: string\n # price: number (dollars)\n # in_stock: number\n title_list = [\"ID\", \"Title\", \"Manufacturer\",\n \"Price\", \"Number in stock\"]\n ui.print_table(table, title_list)", "def print_table(table):\n print(\"City \", end='')\n for month in MONTHS:\n print(\"{:>6}\".format(month), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for month in MONTHS:\n print(\"{:>6}\".format(row[month]), end='')\n print(\"\", end='\\n')", "def print_table(data):\n for key in sorted(data):\n print \"%s: %s\" % (key.rjust(16), data[key])", "def print_table(ledger):\n\n table = PrettyTable() # defines a PrettyTable object\n\n table.field_names = [\n \"hospital\",\n \"patient\",\n \"status\",\n \"nonce\",\n \"prev_hash\",\n \"a\",\n \"b\",\n \"c\",\n \"current_hash\",\n ] # define field names for table\n\n for block in ledger:\n table.add_row(\n [\n block[\"hospital\"],\n block[\"patient\"],\n block[\"status\"],\n block[\"nonce\"],\n block[\"prev_hash\"],\n block[\"a\"],\n block[\"b\"],\n block[\"c\"],\n block[\"current_hash\"],\n ]\n ) # add data to table\n\n print(\"\\n\\n\" + color.BOLD + \"Printing Your Ledger:\" + color.END)\n print(table) # print prettytable of patient info", "def print_database(self):\n table_names = self.catalog\n for table_name in table_names:\n table = self.parse_table(table_name)\n if not table:\n continue\n print(f'TABLE NAME: {table_name}\\r\\n')\n print(tabulate(table, headers=\"keys\"))\n print('\\r\\n\\r\\n\\r\\n\\r\\n')", "def print_tables(db):\n # connect to the database and create a cursor\n\n # select all columns using SQL command\n # 'SELECT * FROM StatelessCountByCountry'\n\n # print the data from StatelessCountByCountry\n\n # select all columns using SQL command\n # 'SELECT * FROM StatelessCountByRegion'\n\n # print the data from StatelessCountByRegion", "def printTable(songs, language):\n\n attributes = \"\"\n if language == ENGLISH:\n print \"Song Name\".ljust(55) + \" | URL\".ljust(60) + \" | Status\\t\\t\"\n print \"-\" * 56 + \"+\" + \"-\" * 57 + \"+\" + \"-\" * 20\n rows = \"\"\n for i in range(len(songs[\"song\"])):\n rows = rows + songs[\"song\"][i][\"name\"].ljust(55) + \" | \" \\\n + songs[\"song\"][i][\"url\"].ljust(57) + \" | \" \\\n + songs[\"song\"][i][\"status\"] + \"\\n\"\n print rows \n else:\n print \"歌曲名稱\".ljust(55) + \" | URL\".ljust(60) + \" | 狀態\\t\\t\"\n print \"-\" * 52 + \"+\" + \"-\" * 59 + \"+\" + \"-\" * 20\n rows = \"\"\n for i in range(len(songs[\"song\"])):\n rows = rows + songs[\"song\"][i][\"name\"].ljust(51) + \" | \" \\\n + songs[\"song\"][i][\"url\"].ljust(57) + \" | \" \\\n + songs[\"song\"][i][\"status\"] + \"\\n\"\n print rows", "def print_table(self, items, fields):\r\n formats = []\r\n borders = []\r\n for f in fields:\r\n length = max(len(f),\r\n max([len(self.string(getattr(i, f))) for i in items]))\r\n justify = '>' if isinstance(getattr(\r\n items[0], f), int) or f == 'size' or f == 'reward' else '<'\r\n formats.append('{:' + justify + self.string(length + 2) + '}')\r\n borders.append('-' * length + ' ')\r\n row_format = u''.join(formats)\r\n headers = [f + ' ' for f in fields]\r\n print(row_format.format(*headers))\r\n print(row_format.format(*borders))\r\n for i in items:\r\n i_fields = [self.string(getattr(i, f)) + ' ' for f in fields]\r\n try:\r\n print(row_format.format(*i_fields))\r\n except UnicodeEncodeError:\r\n print(row_format.format(*i_fields).encode('utf-8'))", "def print_truth_table(formula: Formula) -> None:\r\n # Task 2.4\r\n\r\n headers = list()\r\n for var in list(formula.variables()):\r\n headers.append(var)\r\n headers = sorted(headers) # variable names sorted alphabetic\r\n table = list()\r\n result_lst = list()\r\n index = 0\r\n all_models_local = all_models(list(formula.variables()), True)\r\n for result in truth_values(formula, all_models_local):\r\n if result:\r\n result_lst.append(\"T\")\r\n else:\r\n result_lst.append(\"F\")\r\n\r\n temp_lst = list()\r\n for mod_dict in all_models_local:\r\n for var in headers:\r\n if mod_dict.get(var):\r\n temp_lst.append(\"T\")\r\n else:\r\n temp_lst.append(\"F\")\r\n\r\n # adding the content of result\r\n temp_lst.append(result_lst[index])\r\n index += 1\r\n table.append(temp_lst.copy())\r\n temp_lst.clear()\r\n\r\n # for var in list(formula.variables()):\r\n # headers.append(var)\r\n headers.append(str(formula)) # the result\r\n # headers = sorted(headers)\r\n from tabulate import tabulate\r\n print(tabulate(table, headers, tablefmt=\"orgtbl\").replace(\"+\", \"|\"))", "def pretty_display(self):\n\t\tpretty_space = PrettyTable()\n\t\tpretty_space.field_names = range(self.space.shape[1])\n\t\tcount = 0\n\t\tpretty_row = []\n\t\tfor cell in self.space.flat:\n\t\t\tcount = count + 1\n\t\t\tpretty_row.append(cell.state)\n\t\t\tif count >= self.space.shape[1]:\n\t\t\t\tpretty_space.add_row(pretty_row)\n\t\t\t\tcount = 0\n\t\t\t\tpretty_row = []\n\t\tprint(pretty_space)", "def print_table2(df, eval_dir):\n\n out_file = os.path.join(eval_dir, 'table2.txt')\n\n\n with open(out_file, \"w\") as text_file:\n\n for idx, struc_name in enumerate(['LV', 'RV', 'Myo']):\n # new line\n header_string = ' & '\n line_string = '({}) '.format(struc_name)\n\n for p_idx, phase in enumerate(['ED', 'ES']):\n for measure in ['dice', 'assd', 'hd']:\n\n header_string += ' & {} ({}) '.format(phase, measure)\n\n dat = df.loc[(df['phase'] == phase) & (df['struc'] == struc_name)]\n\n if measure == 'dice':\n\n line_string += ' & {:.3f}\\,({:.3f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n else:\n line_string += ' & {:.2f}\\,({:.2f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n\n if p_idx == 0:\n header_string += ' & '\n line_string += ' & '\n\n header_string += ' \\\\\\\\ \\n'\n line_string += ' \\\\\\\\ \\n'\n\n if idx == 0:\n text_file.write(header_string)\n\n text_file.write(line_string)\n\n return 0", "def print_tables(hash_table, f_output, l_samples):\n\n l_fields = ['chr', 'pos', 'ref', 'alt', 'QUAL', 'FILTER',\n 'Func.refGene', 'Gene.refGene', 'GeneDetail.refGene', 'ExonicFunc.refGene', 'AAChange.refGene',\n 'cytoBand', 'ExAC_ALL', 'ExAC_AFR', 'ExAC_AMR', 'ExAC_EAS', 'ExAC_FIN', 'ExAC_NFE', 'ExAC_OTH',\n 'ExAC_SAS',\n 'avsnp147', 'SIFT_score', 'SIFT_pred', 'Polyphen2_HDIV_score', 'Polyphen2_HDIV_pred',\n 'Polyphen2_HVAR_score',\n 'Polyphen2_HVAR_pred', 'LRT_score', 'LRT_pred', 'MutationTaster_score', 'MutationTaster_pred',\n 'MutationAssessor_score', 'MutationAssessor_pred', 'FATHMM_score', 'FATHMM_pred', 'PROVEAN_score',\n 'PROVEAN_pred', 'VEST3_score', 'CADD_raw', 'CADD_phred', 'DANN_score', 'fathmm-MKL_coding_score',\n 'fathmm-MKL_coding_pred', 'MetaSVM_score', 'MetaSVM_pred', 'MetaLR_score', 'MetaLR_pred',\n 'integrated_fitCons_score', 'integrated_confidence_value', 'GERP++_RS', 'phyloP7way_vertebrate',\n 'phyloP20way_mammalian', 'phastCons7way_vertebrate', 'phastCons20way_mammalian', 'SiPhy_29way_logOdds']\n l_fields = l_fields + l_samples\n \n l_chr = set([item[0] for item in hash_table.keys()])\n\n fo = open(f_output, 'w')\n fo.write(','.join(l_fields) + '\\n')\n for key in sorted(hash_table.keys(), key=itemgetter(1)):\n fo.write(','.join(map(lambda field: hash_table[key].get(field, '.'), l_fields)) + '\\n')\n fo.close()", "def _print_table(stats):\n max_key_len = max([len(key) for key in stats])\n width_right = 15\n width_left = max(width_right, max_key_len)\n divider = '+-' + '-' * width_left + '-+-' + '-' * width_right + '-+'\n\n def get_format_char(value):\n if isinstance(value, int):\n return 'd'\n elif isinstance(value, float):\n return '.4f'\n else:\n return 's'\n\n print(divider)\n for name, value in stats.items():\n left_format = f':>{width_left}s'\n right_format = f':<{width_right}{get_format_char(value)}'\n line_format = f'| {{{left_format}}} | {{{right_format}}} |'\n line = line_format.format(name, value)\n print(line)\n print(divider)", "def start_table(self):\n self.result = \"<table>\\n\"", "def print_output_tables(cls,\n wfns=None, file=None,\n print_intensities=True,\n print_energies=True,\n print_energy_corrections=True,\n print_transition_moments=True,\n operators=None,\n logger=None, sep_char=\"=\", sep_len=100):\n\n if logger is None:\n logger = wfns.logger\n if logger is not None:\n def print_block(label, *args, **kwargs):\n with logger.block(tag=label):\n logger.log_print(\" \".join(\"{}\".format(x) for x in args), **kwargs)\n else:\n if file is None:\n file = sys.stdout\n\n def print_label(label, file=file, **opts):\n lablen = len(label) + 2\n split_l = int(np.floor((sep_len - lablen) / 2))\n split_r = int(np.ceil((sep_len - lablen) / 2))\n print(sep_char * split_l, label, sep_char * split_r, **opts, file=file)\n\n def print_footer(label=None, file=file, **opts):\n print(sep_char * sep_len, **opts, file=file)\n\n def print_block(label, *args, file=file, **kwargs):\n print_label(label, file=file, **kwargs)\n print(*args, file=file, **kwargs)\n print_footer(file=file, **kwargs)\n\n if print_energy_corrections:\n print_block(\"Energy Corrections\", wfns.format_energy_corrections_table())\n if print_energies:\n if wfns.degenerate_transformation is not None:\n print_block(\"Deperturbed Energies\",\n wfns.format_deperturbed_energies_table()\n )\n print_block(\n \"Degenerate Energies\",\n wfns.format_energies_table()\n )\n else:\n print_block(\"States Energies\",\n wfns.format_energies_table()\n )\n\n if print_intensities:\n ints = wfns.intensities # to make sure they're computed before printing starts\n if print_transition_moments:\n if wfns.degenerate_transformation is not None:\n for a, m in zip([\"X\", \"Y\", \"Z\"], wfns.format_deperturbed_dipole_contribs_tables()):\n print_block(\"{} Deperturbed Dipole Contributions\".format(a), m)\n\n print_block(\"Deperturbed IR Data\",\n wfns.format_deperturbed_intensities_table()\n )\n\n for a, m in zip([\"X\", \"Y\", \"Z\"], wfns.format_dipole_contribs_tables()):\n print_block(\"{} Dipole Contributions\".format(a), m)\n print_block(\"IR Data\", wfns.format_intensities_table())\n\n if operators is not None:\n print_block(\"Operator Data\", wfns.format_operator_table(operators))", "def print_state(self):\n print(\"n\\tg\\to\\ta\\tc\\ts\\ttau\\td\\tN\")\n for p in self.persons:\n p.print_state()\n print(\"type\\tpersons\")\n for ps in self.partnerships:\n ps.print_state()", "def print_tables(self):\n\n conn = self.engine.connect()\n self.print_table(self.nodes, conn)\n self.print_table(self.paths, conn)\n self.view_tree(connection=conn)", "def print_truth_table(formula: Formula) -> None:\n # Task 2.4\n variables = list(sorted(formula.variables()))\n assignment_dict = all_models(list(variables))\n assignment_results = list(truth_values(formula, assignment_dict))\n arr = []\n for i, assignment in enumerate(assignment_dict):\n vals = list(assignment.values())\n vals.append(assignment_results[i])\n vals = ['T' if i == True else 'F' for i in vals]\n arr.append(vals)\n\n variables.append(str(formula))\n table_printer(variables, arr)", "def print_report(self):\n print '=' * 20 + ' %s ' % self.label + '=' * 20\n print '%-20s%5s\\t%4s\\t%4s\\t%4s\\t%4s' % (\n 'Hand' + '=' * 16, '#', 'Frac', 'W', 'Tie', 'L')\n for hand, result_dict in self.counts.iteritems():\n total_for_hand = sum(result_dict.itervalues())\n if total_for_hand == 0:\n win_frac = 0.0\n tie_frac = 0.0\n loss_frac = 0.0\n else:\n win_frac = float(result_dict[WIN_RESULT])/total_for_hand\n tie_frac = float(result_dict[TIE_RESULT])/total_for_hand\n loss_frac = float(\n result_dict[LOSS_RESULT])/total_for_hand\n print '%-20s%5d\\t%0.3f\\t%0.3f\\t%0.3f\\t%0.3f' % (\n hand, total_for_hand, float(total_for_hand)/self.total_items,\n win_frac, tie_frac, loss_frac)", "def print_individuals(self):\n pt = PrettyTable()\n pt.field_names = ['ID', 'Name', 'Gender', 'Birthday', 'Age', 'Alive', 'Death', 'Child', 'Spouse']\n for i in self.individuals.values():\n pt.add_row(i.get_values())\n print(pt)", "def print_data(self):\n total_score = 0.0\n\n title_game = 'Game'\n title_word = 'Word'\n title_word_status = 'Word Status'\n title_bad_guesses = 'Bad Guesses'\n title_missed_letters = 'Missed Letters'\n title_total_score = 'Total score'\n\n if not record_word:\n print(\"No words played.\")\n else:\n print('%-5s %-10s %-12s %-5s %-5s %s' %(title_game,title_word, title_word_status, title_bad_guesses, title_missed_letters,title_total_score))\n print('---- ---- ------------ ----------- -------------- -----------')\n for x in range(len(record_word)):\n print('%-5s %-10s %-13s %-11s %-13s %.2f'%(record_game[x],record_word[x],record_word_status[x],record_bad_guesses[x],record_missed_letters[x],record_total_score[x]))\n\n for x in range(len(record_total_score)):\n total_score = total_score + record_total_score[x]\n\n print('\\nFinal Score: %.2f' %total_score)", "def show_table():\n\n title_list = ('ID', 'Platform', 'Producer', 'Year', 'Elements')\n \n return table, title_list", "def print_movie_table(self):\n self = self\n headers = [\"Votes\", \"Rank\", \"Year\", \"Title\"]\n self.handler.header(headers)\n\n for movie in self.movie_list:\n self.handler.row([str(movie.get_votes()), str(movie.get_rank()),\n str(movie.get_year()), str(movie.get_title())])\n\n self.handler.footer()", "def print(self):\n print(\" a b c d e f g h \")\n print(\" ┼───┼───┼───┼───┼───┼───┼───┼───┼\")\n for row in range(8, 0, -1):\n pieces = \" │ \".join(self.state[row - 1])\n print(f\"{row} │ {pieces} │ {row}\")\n print(\" ┼───┼───┼───┼───┼───┼───┼───┼───┼\")\n print(\" a b c d e f g h \")", "def visualise_q_table(q_table):\n # extract best acts\n act_table = np.zeros((4, 4))\n str_table = []\n for row in range(4):\n str_table.append(\"\")\n for col in range(4):\n pos = row * 4 + col\n max_q = None\n max_a = None\n for a in range(4):\n q = q_table[(pos, a)]\n if max_q is None or q > max_q:\n max_q = q\n max_a = a\n act_table[row, col] = max_a\n str_table[row] += act_to_str(max_a)\n\n # print best actions in human_readable format\n print(\"\\nAction selection table:\")\n for row_str in str_table:\n print(row_str)\n print()", "def prompt_table(prompt, table):\n while True:\n print(prompt)\n for i in range(0, len(table)):\n row_format = \"{:>15}\" * (len(table[i]) + 1)\n print(f\"{i})\\t\" + row_format.format(\"\", *table[i]))\n response = prompt_base(\"\")\n try:\n response = int(response)\n if 0 <= response < len(table):\n return table[response]\n except:\n pass", "def display_board(self):\n print(f\"{BREAK_STRING}\\n\")\n print(f\"STOCK \\t WASTE \\t\\t\\t\\tFOUNDATION\\n\")\n print(f\"{self.stock}\\t{self.waste}\\t\\t\\t\\t{self.foundations['clubs']}\\t{self.foundations['diamonds']}\"\n f\"\\t{self.foundations['hearts']}\\t{self.foundations['spades']}\\n\")\n print(f\"\\nTABLEAU\\n\")\n for num in range(1, 8):\n print(f\"{num} {self.tableaus[num-1]}\")\n print(f\"{BREAK_STRING}\\n\")", "def print_table(headers, rows):\n try:\n if headers:\n print('\\n')\n print(tabulate.tabulate(\n rows, headers=headers,\n tablefmt=\"plain\", numalign=\"left\"\n ))\n print('\\n')\n except Exception as e:\n print(e.message)", "def pprint_table(out, table):\n\n\tcol_paddings = []\n\n\tfor i in range(len(table[0])):\n\t\tcol_paddings.append(get_max_width(table, i))\n\n\tfor row in table:\n\t\t# left col\n\t\tout.write(str(row[0]).ljust(col_paddings[0] + 1))\n\t\t\n\t\t# rest of the cols\n\t\tfor i in range(1, len(row)):\n\t\t\tout.write(str(row[i]).rjust(col_paddings[i] + 2))\n\t\t\n\t\tout.write('\\n')", "def display(self):\n print(\n f'\\t\\t {self.name.upper()} {self.potency[0]}{self.potency[1]}\\t\\t'\n f' {self.dose_qty[0]} {self.dose_qty[1]} {self.dose[0]} {self.dose[1].upper()}')", "def print_scoreboard(self):\n output = ''\n # parallel dictionaries with innings and scores\n innings = []\n away = []\n home = []\n for x in self:\n innings.append(x['inning'])\n away.append(x['away'])\n home.append(x['home'])\n # go through all the information and make a nice output\n # that looks like a scoreboard\n output += 'Inning\\t'\n for x in innings:\n output += str(x) + ' '\n output += '\\n'\n for x in innings:\n output += '---'\n output += '\\nAway\\t' + self.__enumerate_scoreboard(away)\n output += '\\nHome\\t' + self.__enumerate_scoreboard(home)\n return output", "def dump(self):\r\n for (name, value) in self.__table__.items():\r\n print (name)\r\n print (value)", "def print_table(table_2D, title_list):\n \n max_length = [] # max length of item for each column\n\n # BELOW VAR NEEDS TO BE FIXED, GOT RID OFF\n # without this correction table horizontal lines displays unevenly\n length_correction = 2 \n\n # count max length of all elements in a table, so we can print all details in neat columns\n for row in table_2D:\n column = 0\n\n for item in row:\n item = str(item)\n\n try:\n if len(item) > max_length[column]:\n max_length[column] = len(item)\n column += 1\n # expand table if needed\n except IndexError:\n max_length.append(0)\n if len(item) > max_length[column]:\n max_length[column] = len(item)\n column += 1\n\n title_index = \"No\"\n\n # print titles, while keeping columns straight\n titles = side_sign + \" \" + title_index + separator_sign\n for i in range(len(title_list)):\n # count length of all titles, to check if they are longer than entries\n if len(title_list[i]) > max_length[i]:\n max_length[i] = len(title_list[i])\n\n titles += title_list[i] + fill(str(title_list[i]), max_length[i]) + separator_sign\n\n print(\"\\n\\t/\" + fill(\"\", len(titles.strip())-length_correction, sourrounding_sign) + \"\\\\\") # print top line\n print(\"\\t\" + titles)\n print(\"\\t\" + side_sign + fill(\"\", len(titles.strip())-length_correction, sourrounding_sign) + side_sign) # print line below titles\n\n table_content = \"\"\n # print all game details, while keeping columns straight\n for row in range(len(table_2D)):\n table_content += \"\\t\" + side_sign + \" \" + str(row+1) + fill(str(row+1), max(len(str(row+1)), len(title_index))) + separator_sign\n for item in range(len(table_2D[row])):\n table_content += str(table_2D[row][item]) + fill(str(table_2D[row][item]), max_length[item]) + separator_sign\n table_content += \"\\n\"\n\n print(table_content, end=\"\")\n print(\"\\t\\\\\" + fill(\"\", len(titles.strip())-length_correction, sourrounding_sign) + \"/\")", "def print_table1(df, eval_dir):\n\n out_file = os.path.join(eval_dir, 'table1.txt')\n\n header_string = ' & '\n line_string = 'METHOD '\n\n\n for s_idx, struc_name in enumerate(['LV', 'RV', 'Myo']):\n for measure in ['dice', 'assd']:\n\n header_string += ' & {} ({}) '.format(measure, struc_name)\n\n dat = df.loc[df['struc'] == struc_name]\n\n if measure == 'dice':\n line_string += ' & {:.3f}\\,({:.3f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n else:\n line_string += ' & {:.2f}\\,({:.2f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n\n if s_idx < 2:\n header_string += ' & '\n line_string += ' & '\n\n header_string += ' \\\\\\\\ \\n'\n line_string += ' \\\\\\\\ \\n'\n\n with open(out_file, \"w\") as text_file:\n text_file.write(header_string)\n text_file.write(line_string)\n\n return 0", "def print_all(self):\n print(\n \"\"\"\\nContents of hash table, with blank lines separating distinct\n linked lists:\"\"\".replace(' ', ''))\n\n for linked_list in self.main_array:\n print(linked_list)\n print('')", "def print_puzzle(state):\r\n \r\n print('-----')\r\n for i in range(4):\r\n print('|', end=\"\")\r\n for j in range(3):\r\n if state[i][j] == 0:\r\n print(\" |\", end=\"\")\r\n else:\r\n print(\"\", state[i][j], \"|\", end=\"\")\r\n if i == 0:\r\n break\r\n print('\\n-------------')", "def show_answer_board(self, coords):\n Minesweeper.print_table(self.final_table, coords)", "def printTableOnlyX(songs, language):\n\n attributes = \"\"\n if language == ENGLISH:\n print \"Song Name\".ljust(50) + \" | URL\".ljust(60) + \" | Status\\t\\t\"\n print \"-\" * 51 + \"+\" + \"-\" * 57 + \"+\" + \"-\" * 20\n rows = \"\"\n for i in range(len(songs[\"song\"])):\n if songs[\"song\"][i][\"status\"] == 'X':\n rows = rows + songs[\"song\"][i][\"name\"].ljust(50) + \" | \" \\\n + songs[\"song\"][i][\"url\"].ljust(57) + \" | \" \\\n + songs[\"song\"][i][\"status\"] + \"\\n\"\n print rows \n else:\n print \"歌曲名稱\".ljust(50) + \" | URL\".ljust(60) + \" | 狀態\\t\\t\"\n print \"-\" * 47 + \"+\" + \"-\" * 59 + \"+\" + \"-\" * 20\n rows = \"\"\n for i in range(len(songs[\"song\"])):\n if songs[\"song\"][i][\"status\"] == 'X':\n rows = rows + songs[\"song\"][i][\"name\"].ljust(46) + \" | \" \\\n + songs[\"song\"][i][\"url\"].ljust(57) + \" | \" \\\n + songs[\"song\"][i][\"status\"] + \"\\n\"\n print rows", "def Table(self, line):\n if line is None:\n # TODO(user): Use resource_printer.TablePrinter() when it lands.\n if self._rows:\n cols = len(self._rows[0])\n width = [0 for _ in range(cols)]\n for row in self._rows:\n for i in range(cols - 1):\n w = len(row[i])\n if width[i] <= w:\n width[i] = w + 1\n for row in self._rows:\n self._out.write(' ' * (self._indent[self._level] + 2))\n for i in range(cols - 1):\n self._out.write(row[i].ljust(width[i]))\n self._out.write(row[-1] + '\\n')\n self._rows = []\n self._table = False\n self._out.write('\\n')\n elif not self._table:\n self._table = True\n self.Line()\n else:\n self._rows.append(line.split(','))", "def printTable (table, word1, word2, speed, fill, turn, LCS=''):\n howFast (speed)\n \n print (50*'\\n') # Only one board on the console at a time\n \n if (fill == 'fill'): # What is happening\n print('Filling out table:')\n elif (fill == 'back'): \n print('Backtracking through table:')\n elif(fill == 'final'): \n print('Final Table:')\n \n whatsHappening = [ # What step is happening\n 'Base Case.', \n 'Letters are the same! Increasing index', \n 'Letters are different. Taking max of left and above.', \n \"Word 2 letter isn't in LCS. Moving up.\", \n \"Word 1 letter isn't in LCS. Moving to left.\", \n 'Letters are the same! Adding to LCS.',\n 'Table Complete!'\n ]\n print(whatsHappening[turn])\n \n print ()\n print(' ', end = '') # Formatting\n \n for z in range(len(word1)): # Printing word 1 at top\n print (word1[z], end='')\n print(' ', end = '')\n print('\\n') \n \n for i in range(len(word2) + 1): \n if (i!=0): \n print(word2[i-1], end = ' ') # Prining word 2 vertical\n else: # Formatting Base Case line\n print(' ', end = '') \n \n for j in range(len(word1) + 1):\n print (table[i][j], end=' ') # Printing table\n \n print()\n print ()\n print('Longest Common Subsequence: ' + str(LCS)) # Live Updating the LCS\n \n if (speed == 'manual'): # To make clear how to proceed\n print()\n print(\"Press 'Enter' to continue\")", "def generate_table(self):\n states = self.get_canonical_collection()\n # self.print_canonical_collection(states)\n table = [{} for _ in range(len(states))]\n\n for index in range(len(states)):\n state = states[index]\n first_rule_cnt = 0\n second_rule_cnt = 0\n third_rule_cnt = 0\n beta = []\n for prod in state:\n dot_index = prod[1].index('.')\n alpha = prod[1][:dot_index]\n beta = prod[1][dot_index + 1:]\n if len(beta) != 0:\n first_rule_cnt += 1\n else:\n if prod[0] != 'S1':\n second_rule_cnt += 1\n production_index = self.grammar.P.index((prod[0], alpha))\n elif alpha == [self.grammar.S[0]]:\n third_rule_cnt += 1\n if first_rule_cnt == len(state):\n table[index]['action'] = 'shift'\n\n elif second_rule_cnt == len(state):\n table[index]['action'] = 'reduce ' + str(production_index)\n\n elif third_rule_cnt == len(state):\n table[index]['action'] = 'acc'\n else:\n conflict_msg = 'Conflict! State I' + str(index) + ': ' + str(state) + '\\nSymbol: ' + beta[0]\n raise (Exception(conflict_msg))\n for symbol in self.grammar.N + self.grammar.E: # the goto part of the table\n next_state = self.go_to(state, symbol)\n if next_state in states:\n table[index][symbol] = states.index(next_state)\n # print(\"table\", table)\n return table", "def print_sansanito():\n\tssn = \"SELECT * FROM sansanito\"\n\tcur.execute(ssn)\n\tprint_table(hdrs_sansanito)", "def print_results(identity_list):\n inversed_indexes = {v: k for k, v in INDEXES.items()}\n highest_val = identity_list[0][0]\n highest_hand = \"A A\"\n lowest_val = highest_val\n lowest_hand = \"A A\"\n running_total = 0.0\n\n print(f\"This table contains win percentages from comparing {HANDS} hands\")\n print(f\"against each other in {SIMULATIONS} simulations\\n\")\n print(\" A K Q J T 9 8 7 6 5 4 3 2\\n\")\n for row in range(len(INDEXES)):\n print(f\"{inversed_indexes[row]} \", end=\"\")\n for col in range(len(INDEXES)):\n print(f\"{format(identity_list[row][col], '.2f')}\", end=\" \") # To two decimal places\n\n # Update highest/lowest values\n if identity_list[row][col] > highest_val:\n highest_val = identity_list[row][col]\n highest_hand = f\"{inversed_indexes[row]} {inversed_indexes[col]}\"\n if row != col:\n suited = True if col > row else False\n highest_hand += ' suited' if suited else ' off'\n\n if identity_list[row][col] < lowest_val:\n lowest_val = identity_list[row][col]\n lowest_hand = f\"{inversed_indexes[row]} {inversed_indexes[col]}\"\n if row != col:\n suited = True if col > row else False\n lowest_hand += ' suited' if suited else ' off'\n\n # Update running total\n running_total += identity_list[row][col]\n\n print(\"\\n\")\n\n print(f\"The hand with the highest win percentage was {highest_hand} \", end=\"\")\n print(f\"with {format(highest_val, '.2f')}% of hands won\")\n print(f\"The hand with the lowest win percentage was {lowest_hand} \", end=\"\")\n print(f\"with {format(lowest_val, '.2f')}% of hands won\")\n print(f\"The average win percentage overall was \", end=\"\")\n print(f\"{format(running_total / len(INDEXES) ** 2, '.2f')}%\")", "def print_table(listx):\r\n\tfor lists in listx:\r\n\t\tfor i in lists:\r\n\t\t\tprint str(i) , '\\t',\r\n\t\tprint()", "def output_table(results, output, keys=None, sort_key=None):\n\n if output not in constants.TABLE_OUTPUT_FORMAT:\n raise ValueError(\"Output format must be{}, \"\n \"got {}\".format(constants.TABLE_OUTPUT_FORMAT,\n output))\n if output == 'print':\n if len(results) == 0:\n print 'No output!'\n return\n\n headers = [keys[k] for k in keys.keys()] if keys else results[0].keys()\n table = PrettyTable(headers)\n for line in results:\n table.add_row([line[k] if k in line else '' for k in (keys.keys() if keys else headers)])\n\n if sort_key:\n table.sortby = keys[sort_key] if keys else sort_key\n\n print table\n\n if output == 'csv':\n csvwriter = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL)\n keys = results[0].keys()\n csvwriter.writerow(keys)\n for row in results:\n csvwriter.writerow([row[k] for k in keys])\n\n if output == 'json':\n print json.dumps(results)", "def __str__(self):\n dictt = self.getFullDict()\n return \"SymbolTable(\\n{}\\n)\".format(pprint.pformat(dictt))", "def print_table(source, count=False):\n table_value = []\n table_header = []\n for source_key, source_value in source.items():\n for item in source_value:\n table_value.append([v for v in item.values()])\n table_header.append([k for k in item.keys()])\n if not count:\n print(tabulate(table_value,\n headers=table_header[0],\n tablefmt='orgtbl'))\n else:\n print(tabulate([[len(source_value)]],\n headers=[source_key],\n tablefmt='orgtbl'))", "def updateTableOutcomes(self):\n if not self.data or not self.predictors or not self.outvar:\n return\n\n classification = self.outvar.varType == orange.VarTypes.Discrete\n\n # sindx is the column where these start\n sindx = len(self.data.domain.variables)\n col = sindx\n showprob = self.showProb and len(self.selectedClasses)\n fmt = \"%%1.%df\" % self.precision\n if self.showClass or (classification and showprob):\n for (cid, c) in enumerate(self.predictors.values()):\n if classification:\n for (i, d) in enumerate(self.data):\n (cl, p) = c(d, orange.GetBoth)\n\n self.classifications[i].append(cl)\n s = \"\"\n if showprob:\n s = \" : \".join([fmt % p[k] for k in self.selectedClasses])\n if self.showClass: s += \" -> \"\n if self.showClass: s += \"%s\" % str(cl)\n self.table.setItem(self.rindx[i], col, QTableWidgetItem(s))\n print s, self.rindx[i], col\n else:\n # regression\n for (i, d) in enumerate(self.data):\n cl = c(d)\n self.classifications[i].append(cl)\n self.table.setItem(self.rindx[i], col, QTableWidgetItem(str(cl)))\n col += 1\n else:\n for i in range(len(self.data)):\n for c in range(len(self.predictors)):\n self.table.setItem(self.rindx[i], col+c, QTableWidgetItem(''))\n col += len(self.predictors)\n\n for i in range(sindx, col):\n if self.showClass or (classification and self.showProb):\n self.table.showColumn(i)\n## self.table.adjustColumn(i)\n else:\n self.table.hideColumn(i)", "def tabout(things, file=sys.stdout):\n print(\"\\t\".join([str(x) for x in things]), file=file)\n file.flush()", "def print_tables(self, amino_df, zinc_df, food_df):\n\n # Output amino acid nutrients dataframe to a csv file\n amino_df.to_csv('amino_acid_food.csv', sep=',')\n print \"A table of amino acids found in different food is saved as amino_acid_food.csv \"\n # Output zinc content of food groups dataframe to a csv file\n zinc_df.to_csv('zinc_FoodGroup.csv', sep=',')\n print \"The zinc value in food belongs to different food groups is saved as zinc_FoodGroup.csv \"\n # Output food group dataframe to a csv file\n food_df.to_csv('FoodGroup.csv', sep=',')\n print \"A table of food names categorized to different food groups is saved as FoodGroup.csv \"", "def print_tables(self,\n wfns=None, file=None,\n print_intensities=True,\n print_energy_corrections=True,\n print_transition_moments=True,\n operators=None,\n logger=None,\n sep_char=\"=\", sep_len=100):\n\n if wfns is None:\n wfns = self.get_wavefunctions()\n\n if isinstance(logger, Logger):\n logger = logger\n elif logger is True or logger is None:\n logger = Logger()\n else:\n logger = Logger(logger)\n\n\n self.print_output_tables(wfns=wfns, file=file,\n print_intensities=print_intensities,\n print_energy_corrections=print_energy_corrections,\n print_transition_moments=print_transition_moments,\n operators=operators, logger=logger,\n sep_char=sep_char, sep_len=sep_len)\n\n return wfns", "def print_table(table, db_file):\n \n try:\n conn, c = connect_to_db(db_file)\n rows = c.execute('SELECT * FROM {t}'.format(t=safe(table))).fetchall()\n cols = c.execute(\"PRAGMA table_info({t})\".format(t=safe(table))).fetchall()\n conn.close()\n pstring = '\\nTABLE ' + table + '\\n'\n r = 1\n for row in rows:\n pstring += '\\nROW ' + str(r)\n for i in range(len(cols)):\n pstring += '\\n ' + cols[i][1].ljust(16) + ' '\n if isinstance(row[i], int):\n pstring += str(row[i])\n elif isinstance(row[i], bytes):\n pstring += row[i].decode('utf-8')\n else:\n pstring += row[i]\n pstring += '\\n'\n r += 1\n return pstring\n except Exception as e:\n print(\"Error when trying to print table\", table)\n print(e)", "def show_html_tables(html_tables):\n\n for (it,t) in enumerate(html_tables):\n print(f\"Table {it}\")\n for (ir,r) in enumerate(t):\n print(f\" Row {ir}\")\n for (ic,c) in enumerate(r):\n print(f\" Col {ic}: {c}\")", "def print_hand(self):\n if self.cheating:\n print(\"You're cheating!\")\n print(\"until you reroll it!\")\n print(\"\"\"\nYou rolled:\na = [ {} ]\nb = [ {} ]\n\nYou are in Stage {}\n \"\"\".format(self.die_a, self.die_b, self.stage))", "def print_game_state(board):\r\n print(board)\r\n illegal_moves = [(0, 0), (2, 0), (0, 4), (2, 4)]\r\n for i in range(board.shape[0]):\r\n buffer = ''\r\n for j in range(board.shape[1]):\r\n if board[i][j] == 1:\r\n buffer += 'X\\t'\r\n elif board[i][j] == 2:\r\n buffer += '0\\t'\r\n elif (i, j) in illegal_moves:\r\n buffer += ' \\t'\r\n else:\r\n buffer += '-\\t'\r\n print (buffer)", "def generate_table(self, outtablename,\n cols=['A', 'B', 'AB'],\n generateTable=True):\n if generateTable:\n new_indices = ['time (s)', 'mean counts']\n for idx in self.data[cols].describe().index[2:]:\n new_indices.append(idx)\n outTable = self.data[cols].describe()\\\n .set_index(pd.Index(new_indices))\n outTable.to_latex(\n self.tables_dir + outtablename + \".tex\", float_format=\"%d\")\n print(\"Outtable: \", outTable)", "def print_table(seqids, data, outputfile, separator='\\t'):\n\n tags = data.keys()\n with open(outputfile, 'w') as out:\n out.write(separator.join([\"#Sequence ID\"] + list(tags)) + \"\\n\")\n for s in seqids:\n out.write(s)\n for t in tags:\n out.write(\"{}{}\".format(separator, data[t].get(s, \"\")))\n out.write(\"\\n\")", "def display_stats_table(after_game_no_dicts, stat_names):\n latest_dict = after_game_no_dicts[len(after_game_no_dicts)]\n first_stat_name = stat_names[0]\n sorted_pairs = get_stats_rankings(latest_dict, first_stat_name)\n\n def get_rows(sorted_pairs):\n latest_stat = None\n latest_position = None\n for position, (team, stats) in enumerate(sorted_pairs, start=1):\n this_stat = getattr(stats, first_stat_name)\n if latest_stat == this_stat:\n position = latest_position\n else:\n latest_position = position\n stat_cells = [getattr(stats, name) for name in stat_names]\n row = [position, team] + stat_cells\n yield_row\n rows = get_rows(sorted_pairs)\n display_table(['Position', 'Team'] + stat_names, rows)", "def print_table(rows, labels=None):\n if labels is None:\n labels = ROW_LABELS\n\n output_table = prettytable.PrettyTable()\n output_table.field_names = labels\n output_table.align = 'l'\n output_table.vrules = prettytable.prettytable.ALL\n output_table.hrules = prettytable.prettytable.HEADER\n\n for row in rows:\n row = [x.strip() for x in row]\n output_table.add_row(row)\n\n print output_table\n print ''", "def print_statements_when_needed(self, statements, message):\n if statements != []:\n print message, self.table.__tablename__\n print '_' * len(message) + '_' * len(self.table.__tablename__), '\\n'\n for s in statements:\n print s", "def print_dice(self):\n\n stage_to_print = 3 if self.current_stage == 4 else self.current_stage\n print(\"You rolled:\\n a = [ {} ]\\n b = [ {} ]\\n\\nYou are in Stage {}\"\n .format(self.die_a, self.die_b, stage_to_print))", "def print_table(rows, header=['Operation', 'OPS']):\n if len(rows) == 0:\n return\n col_max = [max([len(str(val[i])) for val in rows]) + 3 for i in range(len(rows[0]))]\n row_format = ''.join([\"{:<\" + str(length) + \"}\" for length in col_max])\n\n if len(header) > 0:\n print(row_format.format(*header))\n print(row_format.format(*['-' * (val - 2) for val in col_max]))\n\n for row in rows:\n print(row_format.format(*row))\n print(row_format.format(*['-' * (val - 3) for val in col_max]))", "def print_poyo():\n\tpoyo = \"SELECT * FROM poyo\"\n\tcur.execute(poyo)\n\tprint_table(hdrs_poyo)", "def _get_table_info(self):\n highestbet = self.highestBetNotFold(),\n bigb =self.bigBlind() if self._game_state == GAME_STATE_PRE_FLOP and not self.inSmallBlindPosition() else 0\n return [\"blinds: small:%r big:%r\" % (self.small_blind, self.big_blind),\n \"buy_ins: min:%r max:%r\" % (self.min_buy_in, self.max_buy_in),\n \"bs: %r\" % self.betting_structure,\n \"highestbet = %r\" % highestbet,\n \"bigb = %r\" % bigb,]", "def pretty_print(conn, cur): # an out dated function. if use update with fortnite_database object\n\n cur.execute(\"\"\"SELECT * FROM CurrentStats \n ORDER BY alive desc\n \"\"\")\n data = cur.fetchall()\n # returns x in the x[0] // header row in the data.descriptions\n # iterates through all the items in the data description\n print(\"\\n{:20s}{:20s}{:20s}{:20s}{:20s}{:20s}{:20s}{:20s}{:20s}{:20s}\".format(\n \"user_id\", \"username\", \"current_kills\", \"current_weapon\", \"total_wins\", \"total_matches\", \"win_percent\",\n \"total_kills\", \"kd\", \"alive\"))\n\n for d in data:\n print(\n \"{:<20d}{:20s}{:<20d}{:20s}{:<20d}{:<20d}{:<20.2f}{:<20d}{:<20.2f}{:<20d}\".format(d[0], d[1], d[2], d[3], d[4],\n d[5], d[6], d[7], d[8], d[9]))", "def printing_p_matrix(new_all_results):\n\tprint(\"________________________________PROBABILITY MATRIX__________________________________ \")\n\tfor i in range(len(new_all_results)):\n\t\tprint(\"Row Number: \", i+1)\n\t\tprint(\"Vector: \", all_states_explored[i])\n\t\tprint(\"Number of columns: \", len(new_all_results[i]))\n\t\tprint(\"Result: \", new_all_results[i])\n\t\tprint(\"-------------------------------------------------------------------------------------\")\n\tprint(\"____________________________________________________________________________________\")", "def print_status(self):\r\n\t\tif VERBOSE:\r\n\r\n\t\t\tprint( 'Player : ')\r\n\t\t\tfor h in self.hands:\r\n\t\t\t\tprint('\\t' + str(h))\r\n\t\t\tprint( 'Dealer:\\n\\t' + str(self.dealer))\r\n\t\t\tprint( '-----------------------')", "def print_boolean_matrix(true, pred):\n classes = list(true)\n classes.extend(pred)\n classes = list(set(classes))\n matrix_true = dict()\n matrix_false = dict()\n for c in classes:\n matrix_true[c] = 0\n matrix_false[c] = 0\n\n precision, recall, _, _ = score(true, pred, labels=classes)\n\n for i in range(len(true)):\n label_true = true[i]\n label_pred = pred[i]\n if label_true == label_pred:\n matrix_true[label_true] += 1\n else:\n matrix_false[label_true] += 1\n\n print('\\\\begin{table}[h]')\n print('\\\\centering')\n print('\\\\caption{Boolean Matrix}')\n print('\\\\label{boolean_matrix}')\n print('\\\\begin{tabular}{|r|r|r|r|r|}')\n print(' \\\\hline')\n print \"Label & Predicted Correctly & Predicted Incorrectly & Precision & Recall \\\\\\\\ \\\\hline\"\n for i in range(len(classes)):\n print \"{} & {} & {} & {:0.2f} & {:0.2f} \\\\\\\\ \\\\hline\".format(classes[i], matrix_true.get(classes[i], 0), matrix_false.get(classes[i], 0), precision[i], recall[i])\n print \"\\\\multicolumn{{3}}{{|l|}}{{Weighted Average}} & {:0.2f} & {:0.2f} \\\\\\\\ \\hline\".format(precision_score(true, pred, average='weighted'), recall_score(true, pred, average='weighted'))\n print('\\\\end{tabular}')\n print('\\\\end{table}')", "def __inst_status_table(self, \n\t\tinst_status, \n\t\tclock, \n\t\tcolored=True):\n\t\tprint(self.__INST_HORIZ_LINE)\n\t\tprint(\"{val:<{fill}}\".format(val=\"PC\", \n\t\t\tfill=self.__max_pc_len), end=\":\")\n\t\tfor print_label in self.__inst_print_order:\n\t\t\tprint(\"{:^{fill}}\".format(\\\n\t\t\t\tprint_label, fill=self.__inst_fill_len), end=\"|\")\n\t\tprint(\"\\n\", self.__INST_HORIZ_LINE, sep=\"\")\n\t\t\"\"\"\n\t\t\t~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t\t\tEND OF Instruction status table Header\n\t\t\t~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t\t\tSTART OF Instruction status table body\n\t\t\t~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t\t\"\"\"\n\n\t\tfor pc in sorted(list(inst_status.keys())):\n\t\t\tprint(\"{val:<{fill}}\".format(val=pc, fill=self.__max_pc_len), end=\":\")\n\t\t\tfor stage_id in range(len(self.__inst_print_order)):\n\t\t\t\tval=inst_status[pc][self.__inst_print_order[stage_id]]\n\t\t\t\tif colored:\n\t\t\t\t\tcolor = Fore.RED if clock != val else Fore.GREEN\n\t\t\t\t\tcolor_reseter = Style.RESET_ALL\n\t\t\t\telse:\n\t\t\t\t\tcolor = \"\"\n\t\t\t\t\tcolor_reseter = \"\"\n\n\t\t\t\tprint(color + \\\n\t\t\t\t\t\"{:^{fill}}\".format(val if val <= clock else \"\", \n\t\t\t\t\tfill=self.__inst_fill_len) + color_reseter, end=\"|\")\n\t\t\tprint()\n\t\t\"\"\"\n\t\t\t~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t\t\tEND OF Instruction status table body\n\t\t\t~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t\t\"\"\"\n\t\tprint(self.__INST_HORIZ_LINE)", "def print_all_tables(self):\n conn = self.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n print(cursor.fetchall())" ]
[ "0.71382636", "0.71104014", "0.70632297", "0.70341116", "0.6963186", "0.68770564", "0.6833327", "0.6793822", "0.6778215", "0.6727708", "0.6712857", "0.66761214", "0.654547", "0.654547", "0.65261686", "0.6415763", "0.6326378", "0.62906164", "0.6262944", "0.6261385", "0.6230227", "0.6230227", "0.62198466", "0.6197133", "0.61703444", "0.61678874", "0.6141018", "0.6139343", "0.6130137", "0.61275923", "0.6124027", "0.6119995", "0.6113043", "0.61068606", "0.6094552", "0.6081926", "0.6075888", "0.60743797", "0.6072065", "0.60649514", "0.60459465", "0.6024656", "0.6013283", "0.59855634", "0.5951864", "0.59364164", "0.59318006", "0.5910495", "0.59048504", "0.59009427", "0.5897394", "0.5892098", "0.588901", "0.58885604", "0.58661205", "0.58560526", "0.5849004", "0.5845328", "0.58251095", "0.5819344", "0.58185655", "0.581315", "0.5810659", "0.5794354", "0.575345", "0.5749533", "0.57486457", "0.5743951", "0.57014656", "0.5692004", "0.56860876", "0.56793207", "0.5671522", "0.56635666", "0.5657124", "0.5653225", "0.56529623", "0.5652373", "0.56495506", "0.56488603", "0.56450623", "0.5640161", "0.56390685", "0.56253743", "0.56175816", "0.560495", "0.55978084", "0.5596715", "0.55955404", "0.5577507", "0.55759597", "0.55742943", "0.556394", "0.55608046", "0.5547387", "0.55418485", "0.55375385", "0.5534074", "0.5533456", "0.5529976" ]
0.5767268
64
generate a list of viable coordinates for mines, and randomly choose them.
def generate_mines(self, number): mine_locations = [] available_places = [[j, i] for i in xrange(0, self.x) for j in xrange(0, self.y)] while number > 0: # the chosen coordinate for a mine is appended into the list and is # removed from the list of choices to prevent duplicates. choice = random.choice(available_places) available_places.remove(choice) mine_locations.append(choice) number -= 1 return mine_locations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_coordinates():\n return Coordinates(random.randint(0, 14), random.randint(0, 14))", "def add_mines(self):\n for x, y in sample(list(itertools.product(range(self.width), range(self.height))), self.num_mines):\n self.grid[y][x] = self.mine", "def _generate_mines(self):\r\n mines_left = self.mines\r\n while mines_left > 0:\r\n gen_row = random.randint(0, self.rows-1)\r\n gen_col = random.randint(0, self.cols-1)\r\n\r\n if not self.fields[gen_row][gen_col].mine:\r\n self.fields[gen_row][gen_col].mine = True\r\n self._increment_fields_values(gen_row, gen_col)\r\n self.mines_cords.append((gen_row, gen_col))\r\n mines_left -= 1", "def get_random_coordinates(self):\n array_shape = np.shape(self.cells) # type: tuple\n points_on_island = []\n for i in range(1, array_shape[0] - 1):\n for j in range(1, array_shape[1] - 1):\n points_on_island.append((i, j))\n random.shuffle(points_on_island)\n return points_on_island", "def gen_placecells(self, min_spread=0.2):\r\n\r\n N = None\r\n num_tries = 1000 # a limit on the number of attempts to place a new placecell\r\n\r\n # assign random x,y locations to each neuron\r\n locations = [self.random_location()]\r\n while True:\r\n # generate a random new point\r\n new_loc = self.random_location()\r\n\r\n # check that the point isn't too close to previous points\r\n count = 0\r\n while min([self.calc_dist(new_loc, l) for l in locations]) < min_spread and count < num_tries:\r\n new_loc = self.random_location()\r\n count += 1\r\n\r\n # add the new point\r\n locations += [new_loc]\r\n\r\n if (N == None and count >= num_tries) or len(locations) == N:\r\n # stop when required number of place cells built (if N specified),\r\n # or when world has been decently filled\r\n break\r\n\r\n return locations", "def generate_available_position(unavailable_positions, max_position):\n\n x = randint(0, max_position)\n y = randint(0, max_position)\n position = (x, y)\n while position in unavailable_positions:\n x = randint(0, max_position)\n y = randint(0, max_position)\n position = (x, y)\n\n return position", "def generateMines(num_rows, num_cols, num_mines):\n arr = np.random.permutation(num_rows * num_cols)\n return arr[:num_mines]", "def place_mines(board_size, num_mines):\n mines_placed = 0\n board = np.zeros((board_size, board_size), dtype=int)\n while mines_placed < num_mines:\n rnd = randint(0, board_size * board_size)\n x = int(rnd / board_size)\n y = int(rnd % board_size)\n if is_valid(x, y):\n if not is_mine(board, x, y):\n board[x, y] = MINE\n mines_placed += 1\n return board", "def random_position():\n path = (\n os.path.dirname(__file__)\n + os.sep\n + \"templates\"\n + os.sep\n + \"data\"\n + os.sep\n + \"taxi_stations.json\"\n )\n with open(path) as f:\n stations = json.load(f)[\"features\"]\n pos = random.choice(stations)\n coords = [pos[\"geometry\"][\"coordinates\"][1], pos[\"geometry\"][\"coordinates\"][0]]\n lat = float(\"{0:.6f}\".format(coords[0]))\n lng = float(\"{0:.6f}\".format(coords[1]))\n return [lat, lng]", "def random_positions(mini, maxi):\n x_cord = (maxi - mini)*np.random.random(SIZE) + mini\n y_cord = (maxi - mini)*np.random.random(SIZE) + mini\n return np.column_stack([x_cord, y_cord])", "def __get_random_hotspot(self):\n x_min = self.occupancy_map.info.origin.position.x\n x_max = x_min + self.occupancy_map.info.width * self.occupancy_map.info.resolution\n y_min = self.occupancy_map.info.origin.position.y\n y_max = y_min + self.occupancy_map.info.height * \\\n self.occupancy_map.info.resolution\n # This might bes a bit strange, but we have the following problem:\n # some simulators need a square version of the same map. A square version\n # will have other x_max or y_max and thus the random hotspots will be different.\n # TO prevent this, we will always take only the max value of either x_max or y_max.\n # This will be the same for the square version and the not-square version (of the same map).\n max_value = max(x_max, y_max)\n\n # search for a not occupied position\n while True:\n # previously: x = random.uniform(x_min, x_max) # see problem description above\n x = random.uniform(x_min, max_value)\n # previously: y = random.uniform(y_min, y_max) # see problem description above\n y = random.uniform(y_min, max_value)\n # due to the workaround for the problem above, it can be that the value is out\n # of map for the not square map version. We need to skip this (the square\n # map version will skip it due to occupied cell...):\n if x <= x_max and y <= y_max:\n cell_x = min(int(\n (x - x_min) / self.occupancy_map.info.resolution), self.occupancy_map.info.width - 1)\n cell_y = min(int(\n (y - y_min) / self.occupancy_map.info.resolution), self.occupancy_map.info.height - 1)\n if not self.__cell_is_occupied(cell_x, cell_y):\n break\n spread = random.uniform(0.5, 1.0)\n return (x, y, spread)", "def generator(self, random, args):\r\n locations = [i for i in range(len(self.weights))]\r\n random.shuffle(locations)\r\n return locations", "def choose_next(self, round):\n return random.choice(self.possible_coords)", "def Generate_Random( self ):\n print( 'Generating Random coordinates' )\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n self.Data.Stand[s].Tree[t].X = random.uniform( 0, 208.71 )\n self.Data.Stand[s].Tree[t].Y = random.uniform( 0, 208.71 )", "def generate_mine_map(width=30, height=16, num_mines=99):\n\n if num_mines > width * height:\n print(\"The number of mines exceeds the size of the board.\")\n return\n \n mine_map = [[False for i in range(width)] for j in range(height)]\n mines = 0\n while mines < num_mines:\n x = random.randint(0, width-1)\n y = random.randint(0, height-1)\n if not mine_map[y][x]:\n mine_map[y][x] = True\n mines += 1\n\n return mine_map", "def __generate_spawn_points(self):\n while True:\n p1x = random.randint(0, self.width - 1)\n p1y = random.randint(0, self.height - 1)\n p2x, p2y = self.__mirror(p1x, p1y)\n d_sq = (p1x - p2x)**2 + (p1y - p2y)**2\n if d_sq >= (self.width / 2)**2:\n break\n return (p1x, p1y), (p2x, p2y)", "def rand_start_pos(self):\n free_list = np.where(self.grid_map == self.empty_value)\n pos_idx = np.random.randint(free_list[0].shape[0])\n self.set_start_pos((free_list[0][pos_idx], free_list[1][pos_idx]))", "def random_gps_gen_from_range(s_lat,n_lat, e_lon, w_lon):\n #print(s_lat, n_lat, e_lon, w_lon)\n latitude = random.uniform(s_lat, n_lat)\n longitude = random.uniform(e_lon, w_lon)\n return latitude, longitude", "def spawn(self):\n (x_coord, y_coord) = (0, 0)\n grid_x = SCREEN_X // self.size\n grid_y = SCREEN_Y // self.size\n while x_coord < EDGE + 5 or x_coord > SCREEN_X - self.size - EDGE - 5:\n x_coord = random.randrange(grid_x) * self.size\n while y_coord < EDGE + 5 or y_coord > SCREEN_Y - self.size - EDGE - 5:\n y_coord = random.randrange(grid_y) * self.size\n return (x_coord, y_coord)", "def choose_pos(self):\n s = self\n\n availablepos = []\n for dblock in s.pjs.dblocks:\n is_available = True\n\n for powerup in s.pjs.powerups:\n if powerup.rects[0].overlap(dblock.rects[0]):\n is_available = False\n break\n\n if is_available:\n availablepos.append(dblock.rpos)\n\n pos = random.randint(0, len(availablepos) - 1)\n s.rpos = availablepos[pos]", "def __new_position(self):\n iterables = [range(self.size_x), range(self.size_y)]\n points = [] # Save all points in size.\n for point in itertools.product(*iterables):\n points.append(point)\n\n current_points = [] # Save used points.\n for object in self.objects:\n if (object.x, object.y) not in current_points:\n current_points.append((object.x, object.y))\n\n for point in current_points:\n points.remove(point) # Remove all used points.\n\n location = np.random.choice(a=range(len(points)), replace=False)\n return points[location]", "def random_pose(self):\n position = self._start\n while self[position].distance < np.sum(self._rooms.shape) * 2:\n position = np.array(\n [random.randrange(limit) for limit in self._rooms.shape]\n )\n direction = random.choice(self.exits(position))\n return (position, direction)", "def get_random_location(self):\n max_x, max_y, max_z, min_x, min_y, min_z = self.get_max_and_min()\n if max_x == float('-inf') and min_x == float('inf') and max_y == float('-inf') and min_y == float('inf') and \\\n max_z == float('-inf') and min_z == float('inf'):\n x = random.uniform(32, 33)\n y = random.uniform(35, 36)\n z = 0\n ans = x, y, z\n return ans\n counter = 0\n for src, node in self._graph.get_all_v().items():\n if node.location is not None:\n counter += 1\n x = random.uniform(max_x, min_x)\n y = random.uniform(max_y, min_y)\n z = random.uniform(max_z, min_z)\n if counter == 0: # means all nodes doesn't have any location\n x = random.uniform(32, 33)\n y = random.uniform(35, 36)\n z = 0\n ans = x, y, z\n else:\n ans = x, y, z\n return ans", "def _place_nodes(self, i, j, step, max_nodes):\n points = []\n for k in range(max_nodes):\n while(True):\n t = Point(random.randint(i,i+step), random.randint(j,j+step)) \n if all([point.get_distance(t) > self.min_distance for point in points]):\n points.append(t)\n break\n \n for point in points:\n n=Node(self.counter, point)\n self.nodes.append(n)\n self.counter+=1", "def list_of_positions():\n positions = []\n while len(positions) != 20:\n x = random.randrange(0, 20)\n y = random.randrange(0, 20)\n if (x, y) not in positions:\n positions.append((x, y))\n return positions", "def generate(self):\n for i in range(4):\n random_first = randomize_first_box()\n self.randomize(random_first)\n for i in range(9):\n random_pos = randomize_position()\n self.randomize(random_pos)\n self.board.solve()", "def random_coords(bounds):\n x_min, y_min, x_max, y_max = bounds\n x = np.random.randint(x_min, x_max)\n y = np.random.randint(y_min, y_max)\n return x, y", "def _generate_pores(self):\n logger.info(\"Place randomly located pores in the domain\")\n #Original Random Point Generator\n #coords = sp.rand(self._Np,3)*[self._Lx,self._Ly,self._Lz]\n #Seeding Code\n coords = np.zeros([self._Np,3])\n #reject points close to boundaries - if False there will be slightly more\n rejection = [False,False,True]\n for j in range(3):\n i = 0\n while i < self._Np:\n coord = np.random.uniform(0,1,1)\n if self._reject(coord) == rejection[j]:\n coords[i][j]=coord\n i += 1\n coords*=np.array([self._Lx,self._Ly,self._Lz])\n #Seeding Code\n #Uniform Random Generator\n #coords = np.array([np.random.uniform(0,self._Lx,self._Np),np.random.uniform(0,self._Ly,self._Np),np.random.uniform(0,self._Lz,self._Np)]).T\n\n self['pore.coords'] = coords\n logger.debug(\"End of method\")", "def _add_mines(self, mines_count: int, size: int, excluded: int):\n self.mines = random.sample(range(size), mines_count)\n excluded_list = [excluded]\n not_is_left = excluded % self.columns > 0\n not_is_rigth = excluded % self.columns < self.columns - 1\n if excluded // self.rows > 0:\n if not_is_left:\n excluded_list.append(excluded - self.columns - 1)\n excluded_list.append(excluded - self.columns)\n if not_is_rigth:\n excluded_list.append(excluded - self.columns + 1)\n if not_is_left:\n excluded_list.append(excluded - 1)\n if not_is_rigth:\n excluded_list.append(excluded + 1)\n if excluded // self.rows < self.rows - 1:\n if not_is_left:\n excluded_list.append(excluded + self.columns - 1)\n excluded_list.append(excluded + self.columns)\n if not_is_rigth:\n excluded_list.append(excluded + self.columns + 1)\n for el in excluded_list:\n try:\n index = self.mines.index(el)\n if index >= 0:\n new_value = random.randint(0, size - 1)\n while new_value in self.mines or new_value in excluded_list:\n new_value = random.randint(0, size - 1)\n self.mines[index] = new_value\n except ValueError:\n # index method throws ValueError exception if the value is not in the list\n pass\n for mine in self.mines:\n row = mine // self.columns\n column = mine % self.columns\n self._add_bomb(row, column)", "def create_random_points(n):\n\n\treturn [(random.randint(0,n),random.randint(0,n)) for i in range(n)]", "def make_random_move(self):\n s=set()\n for i in range(self.height):\n for j in range(self.width):\n s.add((i,j))\n\n s=s-self.mines-self.moves_made\n if s==set(): return None\n return random.choice(list(s))\n #raise NotImplementedError", "def choose_starting_points(self, side):\n # Left Side\n if side == 1:\n x = np.random.uniform(self.left_side[\"x_min\"], self.left_side[\"x_max\"])\n y = np.random.uniform(self.left_side[\"y_min\"], self.left_side[\"y_max\"])\n # Bottom\n elif side == 2:\n x = np.random.uniform(self.bottom[\"x_min\"], self.bottom[\"x_max\"])\n y = np.random.uniform(self.bottom[\"y_min\"], self.bottom[\"y_max\"])\n # Right Side\n elif side == 3:\n x = np.random.uniform(self.right_side[\"x_min\"], self.right_side[\"x_max\"])\n y = np.random.uniform(self.right_side[\"y_min\"], self.right_side[\"y_max\"])\n # Top\n elif side == 4:\n x = np.random.uniform(self.top[\"x_min\"], self.top[\"x_max\"])\n y = np.random.uniform(self.top[\"y_min\"], self.top[\"y_max\"])\n else:\n raise ValueError(\"Invalid number for sides!\")\n\n return x, y", "def implement_random(self):\n shape = set()\n for coord in INDICES:\n if randint(0, 1):\n shape.add(coord)\n self.implement_shape(shape)", "def random_minimum_neighbor(conflicts):\n np.random.seed(None)\n min_conflict = np.min(conflicts)\n min_conflict_locs = np.where(conflicts == min_conflict)[0]\n # print(\"choices: {}\".format(len(min_conflict_locs)))\n\n return np.random.choice(min_conflict_locs, 1)[0]", "def make_random_move(self):\n #raise NotImplementedError\n # Take out moves_made as well as mines detected\n self.available_cells = self.available_cells - self.moves_made - self.mines\n available_cells = self.available_cells.copy()\n\n # I'll first try and see if there's any move not within the nearby of\n # The mines, I think this can maximise survivability in some cases\n # It'll still work even if didn't do the below\n for sentence in self.knowledge:\n available_cells -= sentence.cells\n #print(sentence)\n #print(self.mines)\n\n # Making a random move\n length = len(available_cells)\n if length != 0:\n index = random.randint(0, length - 1)\n move = list(available_cells)[index]\n self.moves_made.add(move)\n self.mark_safe(move)\n return move\n\n length = len(self.available_cells)\n if length != 0:\n index = random.randint(0, length - 1)\n move = list(self.available_cells)[index]\n self.moves_made.add(move)\n self.mark_safe(move)\n return move\n return None", "def __init__(self, height=50, width=50, mines=100):\n # Set initial width, height, and number of mines\n self.height = height\n self.width = width\n self.mines = set()\n\n # Initialize an empty field with no mines\n self.board = []\n for i in range(self.height):\n row = []\n for j in range(self.width):\n row.append(False)\n self.board.append(row)\n\n # Add mines randomly to the board\n while len(self.mines) != mines:\n i = random.randrange(height)\n j = random.randrange(width)\n if not self.board[i][j]:\n self.mines.add((i, j))\n self.board[i][j] = True\n\n # Maintain a set of mines that is found by the player\n self.mines_found = set() # initially this set is empty", "def make_random_move(self):\n #completely random move\n all_moves = set(itertools.product(range(self.height), range(self.width)))\n moves_left = list(all_moves - self.mines - self.moves_made)\n if not moves_left:\n return None\n return random.choice(moves_left)", "def init_locations():\n player, door, monster = sample(CELLS, k=3)\n\n return player, door, monster", "def pick_chosen_points(m, n):\r\n return [i * n // m + n // (2 * m) for i in range(m)]", "def get_random_coords(width, height):\n return randrange(1, width-2), randrange(1, height-2)", "def set_random_pos(self, which):\n available = [[r, c] for r, row in enumerate(self.maze)\n for c, value in enumerate(row) if value == ' ']\n choice = random.choice(available)\n if which == 'starting':\n self.current_pos = choice\n elif which == 'finishing':\n self.finish_pos = choice", "def __init__minefield__(self):\n # Creates random locations of mines according to the size of the game board.\n mines = random.sample(range(0, self.rows * self.cols), self.mines)\n \n # Uses a helper method to initialize tile categories: mine or zero.\n return [[Tiles(i, j, self.create_tile(mines, i, j)) for j in range(self.cols)] for i in range(self.rows)]", "def __init__(self, boardDimensions, shipsAfloat): \r\n ShotSelector.__init__(self, boardDimensions, shipsAfloat)\r\n self.remainingCoordinates = [Coordinates(i, j) for i in range(self.boardDimensions) for j in range(self.boardDimensions)]\r\n random.shuffle(self.remainingCoordinates)", "def get_random_empty_location(player_x, player_y, min_distance_from_player):\n empty_locations = []\n for row in range(height):\n for column in range(width):\n if False: # TODO: Replace False with condition that checks that location (column, row) is empty and at least min_distance_from_player squares from the player.\n # TODO: Add the empty location tuple to the list of empty locations.\n pass\n # TODO: If the list of empty locations is empty, return None.\n # TODO: Otherwise, return one of the location tuples from the list.", "def rand_coord(n):\n\n x = random.randint(0, n - 1)\n y = random.randint(0, n - 1)\n return x, y", "def RandomCoordinate(): \r\n return ReturnRounded(np.random.uniform(-10,10))", "def _random_points_3d(self, number_of_seeds, min_z, max_z):\n # Sanity check. We can't get more seeds than what's available in the bounds\n assert number_of_seeds <= self.cell_count\n\n found = {}\n while len(found) < number_of_seeds:\n pt = Point2D(random.randint(self._lower_left.x, self._upper_right.x),\n random.randint(self._lower_left.y, self._upper_right.y))\n if pt not in found: # make sure unique\n found[pt] = random.randint(min_z, max_z)\n return [Point3D(pt.x, pt.y, z) for pt, z in found.items()]", "def __init__(self, num_literals):\n self.num_literals = num_literals\n self.position = [randint(0,1) for x in range(num_literals)]\n self.best = self.position\n #initial velocities from -1 to 1\n self.velocity = [2*random()-1 for x in range(num_literals)]\n self.fitness = float(\"-inf\")", "def get_random_point(self):\n\t\tx = np.random.uniform(self.xmin, self.xmax)\n\t\ty = np.random.uniform(self.ymin, self.ymax)\n\t\treturn [x, y, 0.0]", "def random_ship(self):\n l4 = 4\n l3 = 3\n l2 = 2\n l1 = 1\n for i in range(l4):\n row = random.randint(0,9)\n col = random.randint(0,9)\n lst = [row, col]\n self.small_ships.append(lst)\n for i in range(l3):\n row = random.randint(0,8)\n col = random.randint(0,8)\n lst = [row, col]\n lst_1 = [row, col+1]\n if lst in self.small_ships or lst in self.mid_ship:\n if lst_1 in self.small_ships or lst_1 in self.mid_ship:\n l3+=1\n else:\n self.mid_ship.append(lst)\n self.mid_ship.append(lst_1)\n for i in range(l2):\n row = random.randint(0,7)\n col = random.randint(0,7)\n lst = [row, col]\n lst_1 = [row, col+1]\n lst_2 = [row, col+2]\n if lst in self.small_ships or lst in self.mid_ship or lst in self.l_ship:\n if lst_1 in self.small_ships or lst_1 in self.mid_ship or lst_1 in self.l_ship:\n if lst_2 in self.small_ships or lst_2 in self.mid_ship or lst_2 in self.l_ship:\n l2+=1\n else:\n self.l_ship.append(lst)\n self.l_ship.append(lst_1)\n self.l_ship.append(lst_2)\n for i in range(l1):\n row = random.randint(0,6)\n col = random.randint(0,6)\n lst = [row, col]\n lst_1 = [row, col+1]\n lst_2 = [row, col+2]\n lst_3 = [row, col+3]\n if lst in self.small_ships or lst in self.mid_ship or lst in self.l_ship or lst in self.xl_ship:\n if lst_1 in self.small_ships or lst_1 in self.mid_ship or lst_1 in self.l_ship or lst in self.xl_ship:\n if lst_2 in self.small_ships or lst_2 in self.mid_ship or lst_2 in self.l_ship or lst in self.xl_ship:\n if lst_3 in self.small_ships or lst_3 in self.mid_ship or lst_3 in self.l_ship or lst in self.xl_ship:\n l1+=1\n else:\n self.xl_ship.append(lst)\n self.xl_ship.append(lst_1)\n self.xl_ship.append(lst_2)\n self.xl_ship.append(lst_3)\n lst_ship = [self.small_ships, self.mid_ship, self.l_ship, self.xl_ship]\n return lst_ship", "def mutate(w, h, mines, p):\r\n for i in range(w*h):\r\n if uniform(0, 1) <= p:\r\n #mutate:\r\n if i in mines:\r\n mines.remove(i)\r\n else:\r\n mines.append(i)\r\n return mines", "def exploring_starts(self):\n def random_choice(l): return l[np.random.randint(len(l))]\n return map(random_choice, (self.env.states, self.env.moves))", "def generatePiece(self):\n\n empty_tiles = []\n for y in range(BOARD_SIZE):\n for x in range(BOARD_SIZE):\n if self.grid[x][y].isEmpty():\n empty_tiles.append(self.grid[x][y])\n\n two_or_four = random.choice([2, 4])\n random.choice(empty_tiles).set(two_or_four)", "def generate_random_points_in_polygon(num_of_points, polygon) -> list:\n list_of_points = []\n min_x, min_y, max_x, max_y = polygon.bounds\n counter = 0\n while counter < num_of_points:\n point = Point(random.uniform(min_x, max_x), random.uniform(min_y, max_y))\n if polygon.contains(point):\n list_of_points.append(point)\n counter += 1\n return list_of_points", "def mutate_point_poly(mutated_genome):\n seed = random.randint(0,7)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if len(mutated_genome[index][2]) < 3: seed = 0\n if seed == 0:\n insert_point(mutated_genome,index)\n elif seed == 1:\n remove_point(mutated_genome,index)\n elif seed == 2:\n switch_points(mutated_genome,index)\n elif seed == 3:\n shuffle_points(mutated_genome,index)\n elif seed == 4:\n move_point(mutated_genome,index)\n elif seed == 5:\n shift_point(mutated_genome,index)\n elif seed == 6:\n increment_point(mutated_genome,index)\n else: #seed == 7:\n decrement_point(mutated_genome,index)", "def asteroidCreator(numCorner,win):\n\n xCoor = []\n yCoor = []\n\n # Creating coordinates of the points\n coorRange = [i for i in range(-10,10) if i not in [0]] # to avoid 0\n\n for i in range(numCorner):\n xCoor.append(round(random.choice(coorRange)*random.uniform(0.01,1),2))\n yCoor.append(round(random.choice(coorRange)*random.uniform(0.01,1),2))\n\n # Sorting the coordinates\n bubbleSort(xCoor,len(xCoor))\n bubbleSort(yCoor,len(yCoor))\n\n\n # Isolating the extreme points\n xSmallest = xCoor.pop(0)\n xLargest = xCoor.pop()\n\n ySmallest = yCoor.pop(0)\n yLargest = yCoor.pop()\n\n # Shuffle the coordinates\n random.shuffle(xCoor)\n random.shuffle(yCoor)\n\n # Divide them into two sets\n xCoorLower = xCoor[:len(xCoor)//2]\n xCoorUpper = xCoor[len(xCoor)//2:]\n\n yCoorLower = yCoor[:len(yCoor)//2]\n yCoorUpper = yCoor[len(yCoor)//2:]\n\n # Append back the extreme points, and sort them again\n xCoorLower.append(xSmallest)\n xCoorLower.append(xLargest)\n xCoorUpper.append(xSmallest)\n xCoorUpper.append(xLargest)\n\n yCoorLower.append(ySmallest)\n yCoorLower.append(yLargest)\n yCoorUpper.append(ySmallest)\n yCoorUpper.append(yLargest)\n\n bubbleSort(xCoorLower,len(xCoorLower))\n bubbleSort(xCoorUpper,len(xCoorUpper))\n bubbleSort(yCoorLower,len(yCoorLower))\n bubbleSort(yCoorUpper,len(yCoorUpper))\n\n # Getting the vector lengths out of the points\n # We will get vectors in 4 directions from 4 lists\n xVectorLengths = []\n yVectorLengths = []\n\n for i in range(len(xCoorLower)-1):\n xVectorLengths.append(xCoorLower[i]-xCoorLower[i+1])\n for i in range(len(xCoorUpper)-1):\n xVectorLengths.append(xCoorUpper[i+1]-xCoorUpper[i])\n for i in range(len(yCoorLower)-1):\n yVectorLengths.append(yCoorLower[i]-yCoorLower[i+1])\n for i in range(len(yCoorUpper)-1):\n yVectorLengths.append(yCoorUpper[i+1]-yCoorUpper[i])\n\n random.shuffle(xVectorLengths)\n random.shuffle(yVectorLengths)\n\n # Creating the vectors\n vectors = []\n defaultVector = [0,0]\n\n for i in range(len(xVectorLengths)):\n defaultVector[0] = round(xVectorLengths[i],2)\n defaultVector[1] = round(yVectorLengths[i],2)\n vectors.append(defaultVector.copy())\n\n # Sorting vectors by their angle\n sortedVectors = []\n quadrant1 = []\n quadrant2 = []\n quadrant3 = []\n quadrant4 = []\n\n ### Dividing them by quadrants\n for vector in vectors:\n if vector[0] >= 0 and vector[1] >= 0:\n quadrant1.append(vector)\n elif vector[0] <= 0 and vector[1] >= 0:\n quadrant2.append(vector)\n elif vector[0] <= 0 and vector[1] <= 0:\n quadrant3.append(vector)\n elif vector[0] >= 0 and vector[1] <= 0:\n quadrant4.append(vector)\n\n ### Sorting them inside the quadrants\n quadrant1 = angleSort(quadrant1,1,len(quadrant1))\n quadrant2 = angleSort(quadrant2,2,len(quadrant2))\n quadrant3 = angleSort(quadrant3,3,len(quadrant3))\n quadrant4 = angleSort(quadrant4,4,len(quadrant4))\n\n ### Adding them up in order\n for vector in quadrant1:\n sortedVectors.append(vector)\n for vector in quadrant2:\n sortedVectors.append(vector)\n for vector in quadrant3:\n sortedVectors.append(vector)\n for vector in quadrant4:\n sortedVectors.append(vector)\n\n # Creating the points for the polygon\n points = []\n points = vectorsToPoints(sortedVectors,points)\n\n rightEdge = 0\n leftEdge = 0\n upperEdge = 0\n lowerEdge = 0\n\n # getting the boundaries for the asteroid\n for point in points:\n if point[0] > rightEdge:\n rightEdge = point[0]\n elif point[0] < leftEdge:\n leftEdge = point[0]\n if point[1] > upperEdge:\n upperEdge = point[1]\n elif point[1] < lowerEdge:\n lowerEdge = point[1]\n\n # Width and height are only required since it is a child of rotating_block class\n width = rightEdge - leftEdge\n height = upperEdge - lowerEdge\n\n centerPoint = [(rightEdge + leftEdge) / 2 , (upperEdge + lowerEdge) / 2]\n\n asteroid = pho.Asteroid(win,width,height,points,centerPoint[0],centerPoint[1])\n\n return asteroid", "def generate_valid_coordinates(radius, dist_apart):\n\n vtx_x = random.randrange(dist_apart, int(WINDOW_WIDTH - radius), dist_apart);\n vtx_y = random.randrange(dist_apart, int(WINDOW_HEIGHT), dist_apart);\n\n count = 0\n while any((abs(vtx[\"x\"] - vtx_x) <= dist_apart) for vtx in VERTICES) and count < 1000:\n vtx_x = random.randrange(dist_apart, int(WINDOW_WIDTH - dist_apart), dist_apart);\n count += 1\n\n count = 0\n while any((abs(vtx[\"y\"] - vtx_y) <= dist_apart) for vtx in VERTICES) and count < 1000:\n vtx_y = random.randrange(dist_apart, int(WINDOW_HEIGHT), dist_apart);\n count += 1\n return vtx_x, vtx_y", "def get_mines(self):\n\t\treturn ((x, y) for x in range(self.width)\n\t\t for y in range(self.height) if self.mines[x][y])", "def shuffle_pos(self, ):\n x, y = 0, 0\n while self.maze.structure[int(y / 40)][int(x / 40)] != \"0\" \\\n or (x, y) in self.forbiden_tulpes:\n x = random.randint(0, 14) * sprite_size\n y = random.randint(0, 14) * sprite_size\n self.forbiden_tulpes.append((x, y))\n return x, y", "def spawn_start_goal(grid, spawn_seed=None):\n\n xs, ys = np.where(grid == 0)\n free_positions = list(zip(xs, ys))\n\n start, goal = random.Random(spawn_seed).sample(free_positions, 2)\n\n return start, goal", "def randVacantPoint(L):\n pliste = vacantPoint(L)\n\n return pliste[random.randint(0, len(pliste)-1)]", "def get_random_pos(self):\n i = np.random.randint(self.n)\n j = np.random.randint(self.m)\n return [i, j]", "def make_random_move(self):\n choice = None\n options = []\n #generate full moves list\n for i in range(self.width):\n for j in range(self.height):\n #make sure move has not been made\n if (i,j) not in self.moves_made:\n #make sure move is not a mine\n if (i,j) not in self.mines:\n options.append((i,j))\n #if there are no options, return None\n if len(options) == 0:\n return None\n\n #pick a random option from generated list\n choice = random.choice(options)\n return choice\n\n \"\"\"\n For kicks and giggles I wrote this extra bit to determine a\n rough intuitive probability for each option based on the knowledge\n base, so rather than picking a choice randomly the AI can choose\n the option that is, at least intuitively, least likely to blow up.\n Better to take the 1/8 chance than the 1/3 chance, right?\n \"\"\"\n best_chance = 1\n #iterate through generated options\n for option in options:\n #Could set chance to 1/8, but the AI wouldn't actually know that. I\n #only know it because I can read the code...But for the purposes of this\n #drill we'll say the AI doesn't know how many bombs are placed.\n #Better then to pick a square we know nothing about than one that\n #has a 1/8 chance of exploding. Gather more information that way.\n chance = 0\n for sentence in self.knowledge:\n #look to see if current option is in sentences\n if option in sentence.cells:\n #use sentence count and length of cell set to calculate probability\n prob = sentence.count / len(sentence.cells)\n if prob > chance:\n #Looking for the highest explosive probability for this square\n chance = prob\n if chance < best_chance:\n #If this option has lower odds of exploding than current best, it becomes\n #the optimal\n best_chance = chance\n choice = option\n\n #return choice", "def _random_start_position(self):\r\n self.position = np.array(random.choice(self.start_positions),\r\n dtype=np.int16)", "def get_random_position_near_path(\n game: TowerDefenceSolver,\n cov_xx: int,\n cov_yy: int,\n purchased_towers: Purchases,\n max_number_of_tries: Optional[int] = None,\n) -> Optional[Tuple[int, int]]:\n position = tuple(\n np.round(\n np.random.multivariate_normal(game.path[np.random.choice(len(game.path))], cov=[[cov_xx, 0], [0, cov_yy]])\n ).astype(int)\n )\n\n number_of_tries = 0\n while not validate_pos(game, position, purchased_towers):\n position = tuple(\n np.round(\n np.random.multivariate_normal(\n game.path[np.random.choice(len(game.path))], cov=[[cov_xx, 0], [0, cov_yy]]\n )\n ).astype(int)\n )\n number_of_tries += 1\n if max_number_of_tries and number_of_tries > max_number_of_tries:\n return None\n\n return position", "def get_grid_coords(self, count, boundry_x, boundry_y, grid_size):\n\n coords = []\n\n boundry_x = int(boundry_x/10)\n boundry_y = int(boundry_y/10)\n\n while len(coords) < count:\n seed()\n\n\n x = randint(-boundry_x, boundry_x)\n y = randint(-boundry_y, boundry_y)\n\n if len(coords) == 0:\n coords.append((x*grid_size, y*grid_size))\n else:\n for coord in coords:\n if (x not in range(coord[0]-buffer*grid_size, coord[0]+buffer*grid_size)) and (y not in range(coord[1]-buffer, coord[1]+buffer)):\n pass\n else:\n break", "def _get_random_position(self):\n return (random.randrange(0, self.maze.width),\n random.randrange(0, self.maze.height))", "def get_random_solution(self, rand_sol_rng):\n x = tuple([rand_sol_rng.randint(0, self.model.factors[\"num_rooms\"]) for _ in range(self.dim)])\n return x", "def place_allowed_tower_sites():\n self.coordinates__tower_sites = []\n for tk in xrange(self.N_tower_kinds):\n #Each kind of tower will have the correct number of sites placed\n \n coords = []\n while len(coords)<self.N_tower_sites[tk]:\n x = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[1]+1-self.BORDER_MARGIN,size=1)[0]\n y = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[0]+1-self.BORDER_MARGIN,size=1)[0]\n p = (x,y) \n all_valid = True\n for rect in self.coordinates__obstacles:\n if not check_valid_placement(p,rect):\n all_valid = False\n break\n if all_valid:\n coords.append(p)\n self.coordinates__tower_sites.append(coords)", "def random_location(self):\n return random.choice(self.locations_list)", "def getRandomCoordinates( self, size ):\n if not self.mIsLoaded: self.__loadIndex()\n\n token = random.choice( self.mIndex.keys() ) \n strand = random.choice( (\"+\", \"-\") )\n pos_id, pos_seq, lcontig = self.mIndex[token][:3]\n rpos = random.randint( 0, lcontig )\n if random.choice( (\"True\", \"False\") ):\n start = rpos\n end = min(rpos + size, lcontig)\n else:\n start = max(0, rpos - size)\n end = rpos\n \n return token, strand, start, end", "def add_points(grid, num_points):\n \n for i in range(num_points):\n # Coord for crit point\n rand_x = random.randint(0, GRID_WIDTH - 1)\n rand_y = random.randint(0, GRID_HEIGHT - 1)\n \n # Set value of crit point\n elev = (MAX_HEIGHT - MIN_HEIGHT) * random.random() + MIN_HEIGHT\n grid[rand_x][rand_y] = elev * PEAK_HEIGHT\n \n return grid", "def random_position(self):\n\t\treturn (random.randint(1, self.max_x-2), random.randint(1,self.max_y-2))", "def put_items(self,*maplist):\n self.position_x = random.randint(0, (len(maplist) - 1))\n self.position_y = random.randint(1, (len(maplist[0]) - 2))\n\n while maplist[self.position_y][self.position_x] == \"x\":\n self.position_x = random.randint(0, (len(maplist) - 1))\n self.position_y = random.randint(1, (len(maplist[0]) - 2))", "def candidate_start_points_random(bounds, n_candidates=1000,\n random_state=None):\n generator = check_random_state(random_state)\n\n low, high = zip(*bounds)\n n_dims = len(bounds)\n return generator.uniform(low, high, (n_candidates, n_dims)).transpose()", "def __sample(self):\n # xvals are \"east\" vals and yvals are \"north\" vals on the map\n xvals = np.random.uniform(self._xmin, self._xmax, self._num_samples)\n yvals = np.random.uniform(self._ymin, self._ymax, self._num_samples)\n if self._target_altitude is None:\n zvals = np.random.uniform(self._zmin, self._zmax, self._num_samples)\n else:\n zvals = np.full(self._num_samples, self._target_altitude, dtype=float)\n \n samples = list(zip(xvals, yvals, zvals))\n\n pts = []\n for s in samples:\n in_collision = False\n idxs = list(self._obstacles_tree.query_radius(\n np.array([s[0], s[1]]).reshape(1, -1), r=self._max_poly_xy)[0])\n \n if len(idxs) > 0:\n for ind in idxs: \n p = self._polygons[int(ind)]\n if p.contains(s) and p.height >= s[2]:\n in_collision = True\n\n if not in_collision:\n pts.append(s)\n \n return pts", "def _generateObjectPositions(self, num=1, retry=100, radius_scale=1.0, radius_offset=1.0, angle_scale=1.0, angle_offset=0.5*np.pi, z=0.5, near_distance=1.0):\n def genPos():\n r = radius_scale * self.np_random.rand() + radius_offset\n a = -np.pi * angle_scale + angle_offset\n b = np.pi * angle_scale + angle_offset\n ang = (b - a) * self.np_random.rand() + a\n return np.array([r * np.sin(ang), r * np.cos(ang), z])\n def isNear(pos, poss):\n for p, o in poss:\n if np.linalg.norm(p - pos) < near_distance:\n return True\n return False\n def genPosRetry(poss):\n for i in range(retry):\n pos = genPos()\n if not isNear(pos, poss):\n return pos\n return genPos()\n poss = []\n for i in range(num):\n pos = genPosRetry(poss)\n orn = p.getQuaternionFromEuler([0.0, 0.0, 2.0*np.pi*self.np_random.rand()])\n poss.append((pos, orn))\n self.np_random.shuffle(poss)\n return poss", "def randomposition(self, identifier):\n # Initialize a validation to make sure we only generate one of each item\n onmap = False\n\n # Loop checking if a random spot is free\n # on the 15*15 squares map with the ' ' character\n while not onmap:\n self.square_x = random.randint(0, 14)\n self.square_y = random.randint(0, 14)\n # if the sprite is not a wall or another item\n if self.maze.structure[self.square_y][self.square_x] == ' ':\n # then we add the item identifier to the map like 'b' for bottle\n self.maze.structure[self.square_y][self.square_x] = identifier\n # then we exit the loop by moving the validation to True and making sure\n # we do not exceed the maximum amount of 1 item\n onmap = True\n return self.square_x, self.square_y", "def possibleMovements(self,numIterations:int=50)->list[tuple]:\n x=random.randint(0,self._side-1); y=random.randint(0,self._side-1)\n possible_positions=[]\n positionsCovered=[(x,y)]\n for _ in range(numIterations):\n if x+2<self._side and y+1<self._side:\n possible_positions.append((x+2,y+1))\n \n if x+2<self._side and y-1<self._side and y-1>0:\n possible_positions.append((x+2,y-1))\n \n if x-2<self._side and y+1<self._side and x-2>0:\n possible_positions.append((x-2,y+1))\n \n if x-2<self._side and y-1<self._side and x-2>0 and y-1>0:\n possible_positions.append((x-2,y-1)) \n\n if x+1<self._side and y+2<self._side:\n possible_positions.append((x+1,y+2))\n \n if x+1<self._side and y-2<self._side and y-1>0:\n possible_positions.append((x+1,y-2))\n\n if x-1<self._side and y+2<self._side and x-1>0:\n possible_positions.append((x-1,y+2))\n \n if x-1<self._side and y-2<self._side and x-1>0 and y-2>0:\n possible_positions.append((x-1,y-2))\n\n newX,newY=random.choice(possible_positions) #choose randomly among the possible positions,and then repeat this \n x,y=newX,newY\n positionsCovered.append((newX,newY)) \n\n return positionsCovered", "def randomized_prims(width=16, height=16) -> Maze:\n maze = Maze(width=width, height=height, algorithm=None)\n visited = [[False for _ in range(maze.width)] for _ in range(maze.height)]\n\n # ensure only one entrance to the center squares\n centerx = maze.width // 2 - 1\n centery = maze.height // 2 - 1\n \n visited[centery][centerx] = True\n visited[centery][centerx+1] = True\n visited[centery+1][centerx+1] = False\n visited[centery+1][centerx] = True\n\n visited[0][0] = True\n boundary = [(0,0,Compass.EAST), (0,0,Compass.SOUTH)]\n\n while boundary:\n x, y, direction = boundary.pop(random.randint(0, len(boundary)-1))\n nx, ny = maze.neighbor(x, y, direction)\n if not visited[ny][nx]:\n maze.break_wall(x, y, direction)\n boundary.extend([(nx,ny,direction) for direction in maze.neighbors(nx, ny)])\n visited[ny][nx] = True\n \n return maze", "def get_random_points(N): \n x1 = np.random.uniform(-1,1,N)\n x2 = np.random.uniform(-1,1,N)\n return (x1,x2)", "def SetRandomInitialPoints(self, min=None, max=None):\n raise NotImplementedError, \"must be overwritten...\"", "def get_spawn_locs(n: int, spawnbox: Optional[str] = None) -> np.ndarray:\r\n if spawnbox is None:\r\n spawnbox = cng.SPAWNBOX_OBJ\r\n\r\n box = bpy.data.objects[spawnbox]\r\n loc = np.array(box.location) # Center location\r\n scale = np.array(box.scale)\r\n\r\n points = np.random.uniform(low=-scale, high=scale, size=(n, 3)) + loc\r\n return points", "def generate_random_point(xmin,xmax,ymin,ymax):\n\tnp.random.seed()\n\tx_rand = np.random.uniform(xmin,xmax)\n\ty_rand = np.random.uniform(ymin,ymax)\n\treturn(x_rand,y_rand)", "def random_location(x_lower = 0, x_upper = 100, y_lower = 0, y_upper = 100):\n x = random.randint(x_lower, x_upper - 1)\n y = random.randint(y_lower, y_upper - 1)\n return (x, y)", "def generate(nb, vmax, valid, col):\n q = [];\n maxi = vmax\n for i in range(nb - 1):\n # au moins 5 et pas plus de la moitié des ressources\n _ = random.choice(range(5, maxi // 2))\n maxi -= _\n q.append(_)\n q.append(maxi)\n p = [random.choice(range(5, vmax - 5)) for _ in range(nb)]\n l = random.sample(valid, nb)\n return [((loc // col, loc % col), qte, prix)\n for loc, qte, prix in zip(l, q, p)]", "def generate_random_points(\n start: Float,\n end: Float,\n limit: Integer\n) -> List[Point]:\n\n return [\n Point(x=random.uniform(start, end), y=random.uniform(start, end))\n for _ in range(limit)\n ]", "def _add_mines(self):\n num = 0\n while num < self._n:\n x = random.randint(0, self._dim - 1)\n y = random.randint(0, self._dim - 1)\n if self._board[x][y] != -1:\n self._board[x][y] = -1\n neighbors = self._get_neighbors((x, y))\n for neighbor in neighbors:\n if self._board[neighbor[0]][neighbor[1]] != -1:\n self._board[neighbor[0]][neighbor[1]] += 1\n num += 1", "def _random2min_max(points):\n x_max = max([x for x, y in points])\n x_min = min([x for x, y in points])\n y_max = max([y for x, y in points])\n y_min = min([y for x, y in points])\n return np.array([x_min, y_min, x_max, y_max])", "def get_hit_points(min, max):\n return random.randint(min, max)", "def pickMinRandomly(a):\n listMin = []\n m = np.min(a)\n for i,e in enumerate(a):\n if e == m:\n listMin.append(i)\n \n #~ print(m)\n #~ listMin = np.where(a==m)[0]\n return listMin[random.randint(0,len(listMin)-1)]", "def generate_fake_pos(self):\n # 0. 104 binary planes of length 64\n planes = [\n np.random.randint(2, size=64).tolist() for plane in range(104)\n ]\n\n # 1. generate the other integer data\n integer = np.zeros(7, dtype=np.int32)\n for i in range(5):\n integer[i] = np.random.randint(2)\n integer[5] = np.random.randint(100)\n\n # 2. 1858 probs\n probs = np.random.randint(9, size=1858, dtype=np.int32)\n\n # 3. And a winner: 1, 0, -1\n winner = np.random.randint(3) - 1\n\n # 4. evaluation after search\n best_q = np.random.uniform(-1, 1)\n best_d = np.random.uniform(0, 1 - np.abs(best_q))\n return (planes, integer, probs, winner, best_q, best_d)", "def _init_random_coord(self):\n x_coord = \\\n random.randrange(Screen.SCREEN_MIN_X, Screen.SCREEN_MAX_X)\n y_coord = \\\n random.randrange(Screen.SCREEN_MIN_Y, Screen.SCREEN_MAX_Y)\n self.x_coord = x_coord\n self.y_coord = y_coord", "def generate_pokemons(self, grid_size):\n cell_count = grid_size ** 2\n pokemon_locations = ()\n for _ in range(self._num_pokemon):\n if len(pokemon_locations) >= cell_count:\n break\n index = random.randint(0, cell_count - 1)\n\n while index in pokemon_locations:\n index = random.randint(0, cell_count - 1)\n\n pokemon_locations += (index,)\n return pokemon_locations", "def _select(self):\n sel = []\n\n # choose randomly while favouring fit individuals\n lp = len(self.population) // 2\n for _ in range(lp):\n idx1 = idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n while idx1 == idx2:\n idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n\n p1, p2 = self.population[idx1], self.population[idx2]\n sel.append((p1, p2))\n\n return sel", "def deploy_mines(row, col):\r\n num_deployed = 0\r\n while num_deployed <= num_mines:\r\n r, c = randint(0, height-1), randint(0, width-1)\r\n if (r == row and c == col) or mines[r][c] == 1:\r\n continue\r\n mines[r][c] = 1\r\n num_deployed += 1", "def get_random_position(self):\n if self._geometry_type in ['area', 'circle']:\n geo = self.get_geometry()\n min_x, min_y, max_x, max_y = geo.bounds\n pnt = Point(\n random.uniform(min_x, max_x), \n random.uniform(min_y, max_y))\n while not geo.contains(pnt):\n pnt = Point(\n random.uniform(min_x, max_x), \n random.uniform(min_y, max_y))\n return pnt\n else:\n return None", "def cell_sample(mask, samplingPoints):\n maskedArea = np.array(np.where(mask)).T\n maskedAreaLength = len(maskedArea)\n randomIndex = sp.random.randint(0, maskedAreaLength, samplingPoints)\n coordsRandom = maskedArea[randomIndex] + sp.rand(samplingPoints, 2)\n return(coordsRandom)", "def mutate_point_poly3(mutated_genome):\n seed = random.randint(0,7)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if len(mutated_genome[index][2]) < 1: seed = 0\n if seed == 0:\n insert_point(mutated_genome,index)\n elif seed == 1:\n remove_point(mutated_genome,index)\n elif seed == 2:\n switch_points(mutated_genome,index)\n elif seed == 3:\n shuffle_points(mutated_genome,index)\n elif seed == 4:\n move_point(mutated_genome,index)\n elif seed == 5:\n shift_point(mutated_genome,index)\n elif seed == 6:\n increment_point(mutated_genome,index)\n else: #seed == 7:\n decrement_point(mutated_genome,index)", "def nonuniform_bounds_mutation(random, candidate, args):\n lower_bound = args.get('lower_bound')\n upper_bound = args.get('upper_bound')\n strength = args.setdefault('mutation_strength', 1)\n mutant = copy(candidate)\n for i, (c, lo, hi) in enumerate(zip(candidate, lower_bound, upper_bound)):\n if random.random() <= 0.5:\n new_value = c + (hi - c) * (1.0 - random.random() ** strength)\n else:\n new_value = c - (c - lo) * (1.0 - random.random() ** strength)\n mutant[i] = new_value\n\n return mutant" ]
[ "0.6889577", "0.68553376", "0.6830914", "0.6830659", "0.66476494", "0.64785975", "0.64664406", "0.646031", "0.6445049", "0.6439096", "0.6436462", "0.64131314", "0.6397925", "0.6376756", "0.6361325", "0.6299357", "0.62326545", "0.6220271", "0.6188136", "0.61843365", "0.615677", "0.6156487", "0.61557424", "0.6139877", "0.6122892", "0.61216825", "0.61072123", "0.6080504", "0.6072965", "0.6072531", "0.6062726", "0.6030199", "0.60162127", "0.6007755", "0.60063505", "0.59984374", "0.5994811", "0.5989608", "0.5972204", "0.595645", "0.59511626", "0.5937878", "0.5929312", "0.59247565", "0.5914606", "0.5912904", "0.59074444", "0.59060806", "0.5904662", "0.5896044", "0.58939856", "0.5889955", "0.5864477", "0.58643913", "0.5863207", "0.58609325", "0.5855151", "0.5854027", "0.58448654", "0.58398104", "0.5837808", "0.58304965", "0.5829821", "0.5829475", "0.58233726", "0.5780713", "0.577181", "0.57676363", "0.5764083", "0.5762277", "0.57495373", "0.5735551", "0.5725909", "0.5725404", "0.5724352", "0.5719476", "0.5714688", "0.57123077", "0.5703651", "0.5701765", "0.56915724", "0.56761175", "0.5675022", "0.56676733", "0.5662087", "0.5659737", "0.56552684", "0.5649564", "0.56460714", "0.5643653", "0.564326", "0.56404823", "0.56376016", "0.563589", "0.5633474", "0.563322", "0.56304485", "0.5623805", "0.56209064", "0.5618059" ]
0.7681125
0
populate answer table with numbers and mines
def get_neighbour(self, y, x): if [y, x] in self.mine_locations: return Minesweeper.BOMB count = 0 # (x-1, y-1), (x, y-1), (x+1, y-1), # (x-1, y), (x, y), (x+1, y), # (x-1, y+1), (x, y+1), (x+1, y+1) for xe in range(x - 1, x + 2): for ye in range(y - 1, y + 2): if [ye, xe] in self.mine_locations: count += 1 return str(count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(num_q: int, point_list: List[int])-> int:\n dp_table", "def __init__(self, height, width, mines):\n self.x = int(width)\n self.y = int(height)\n self.table_state = [\n ['-' for i in xrange(0, self.x)] for j in xrange(0, self.y)]\n self.mine_locations = self.generate_mines(int(mines))\n self.final_table = self.generate_answer()", "def solution(self):\n return [(\"simple 1\", 1.),\n (\"simple 2\", 1.),\n (\"simple 3\", 1.),\n (\"simple 4\", 1.),\n (\"simple 5\", 1.),\n (\"simple 10\", 1.),\n (\"simple 15\", 1.),\n (\"thai 1\", 1.),\n (\"thai 2\", 1.),\n (\"thai 3\", 1.),\n (\"thai 4\", 1.),\n (\"thai 5\", 1.),\n (\"thai 10\", 1.),\n (\"thai 15\", 1.),\n ]", "def generate_acceptable_answers(correct_answer):\n correct_answer = convert_to_scientific_notation(correct_answer)\n number, exponent = correct_answer.split(\"e\")\n number = float(number)\n max_number = number + .01\n min_number = number - .01\n max_number = str(max_number) + \"e\" + exponent\n min_number = str(min_number) + \"e\" + exponent\n\n min_number = convert_to_scientific_notation(float(min_number))\n correct_answer = convert_to_scientific_notation(float(correct_answer))\n max_number = convert_to_scientific_notation(float(max_number))\n\n return [min_number, correct_answer, max_number]", "def parse_scores(option, table_num, language, min_score, max_score):\n html = query_by_lang(BASE_URL + '/' + option, language)\n table = html.find_all(\"table\")[table_num]\n freq_list = table.find_all(\"tr\")[2:]\n\n # Create the dictionary\n list_min = find_min(freq_list)\n list_max = find_max(freq_list)\n return create_dict(freq_list, list_min, list_max, min_score, max_score)", "def expand_numbers(self):\n question = self\n magnitudes = [\n ['thousand', '000'],\n ['million', '000000'],\n ['billion', '000000000'],\n ['trillion', '000000000000'],\n ['quadrillion', '000000000000000'],\n ['quintillion', '000000000000000000'],\n ['sextillion', '000000000000000000000'],\n ['septillion', '000000000000000000000000'],\n ['octillion', '000000000000000000000000000'],\n ['nonillion', '000000000000000000000000000000'],\n ['decillion', '000000000000000000000000000000000']\n ]\n for magnitude, zeroes in magnitudes:\n pattern = re.compile('[0-9]+ ?'+magnitude)\n for match in re.finditer(pattern, question):\n match = match.group(0)\n num = re.search('[0-9]+', match).group(0)\n question = re.sub(re.escape(match), num+zeroes, question)\n return Qa3Question(question)", "def solution(self):\n return [(\"the\", 1561900)] * 100", "def tabulate_score(self):\n # Generate the bit formatting string\n n_bits = int(np.ceil(np.log2(self.max_ans)))\n fmt_str = \"{0:0\"+str(n_bits)+\"b}\"\n \n # Convert answers to int\n self.answers = self.answers.astype(np.int64)\n\n # Convert each element in answers to a bit string\n bits = [fmt_str.format(ans) for ans in self.answers]\n bit_string = \"\".join(bits)\n \n # Convert the combined bit string back to an integer\n seed = int(bit_string, 2)\n\n return seed", "def score_student_answer(self,question_type,question_data,student_answer):\n\t\treturn (0.0,\"\")", "def create_numbers_table():\n work_tuples = parse_columns()\n print('\\n\\n\\n ----- Tableau récapitulatif -----')\n print('-----------------------')\n for ii in work_tuples:\n line = '|'\n for ij in ii:\n line += ' ij |'\n print(line)\n print('-----------------------')", "def show_answer(self) -> None:\n for _, cell in self.cells.items():\n cell.set_answer()", "def get_table(self):\n \n # During testing, this'll speed the process update\n \n row = 0\n while row < 9:\n sudoku_row = input(\"Please enter the contents of row {}, using 0 to represent blanks:\".format(row+1))\n if len(sudoku_row) == 9:\n column = 0\n while column < 9:\n number_in_box = int(sudoku_row[column])\n self.table[row][column] = number_in_box\n column += 1\n row += 1\n else:\n print(\"You can only enter 9 numbers. Not letters. Not more. Not fewer. 9 numbers.\")", "def gen_questions(self, number_of_questions):", "def solution(self):\n return [(\"the\", 1579644)] * 100", "def __init__(self):\n self.nums = []\n self.mins = []", "def generate(self):\r\n # prepare data\r\n banknote_quantity_max = [int(math.floor(self.money / self.banknotes[i])) for i in range(0, self.n)]\r\n # model\r\n mdl = Model(name='MinSetGenerator')\r\n # decision variables\r\n mdl.banknote_quantity = {i: mdl.integer_var(lb=0, ub=banknote_quantity_max[i]) for i in range(0, self.n)}\r\n # decision expressions\r\n money_amount = mdl.sum(mdl.banknote_quantity[i] * self.banknotes[i] for i in range(0, self.n))\r\n notes_quantity = mdl.sum(mdl.banknote_quantity[i] for i in range(0, self.n))\r\n # constraints\r\n mdl.add_constraint(money_amount == self.money)\r\n # strategy\r\n mdl.minimize(notes_quantity)\r\n # solve model: return quantity of each banknotes and a set with a minimal number of banknotes\r\n if not mdl.solve():\r\n print('*** No solution!')\r\n return None, None\r\n else:\r\n return [int(mdl.banknote_quantity[i].solution_value) for i in range(0, self.n)], \\\r\n [self.banknotes[i] for i in range(0, self.n) if mdl.banknote_quantity[i].solution_value > 0]", "def return_questions_data():\n conn = sq.connect(host='localhost', user='root',\n password='student', database='quiz')\n cursor = conn.cursor()\n \n cursor.execute(\"select * from questions\")\n data = cursor.fetchall()\n\n table = PrettyTable()\n table.field_names = ['Question', 'Answer']\n questions = {}\n for q,a in data:\n table.add_row([q,a])\n questions[q] = a\n conn.close()\n\n return table, questions", "def visualise_q_table(q_table):\n # extract best acts\n act_table = np.zeros((4, 4))\n str_table = []\n for row in range(4):\n str_table.append(\"\")\n for col in range(4):\n pos = row * 4 + col\n max_q = None\n max_a = None\n for a in range(4):\n q = q_table[(pos, a)]\n if max_q is None or q > max_q:\n max_q = q\n max_a = a\n act_table[row, col] = max_a\n str_table[row] += act_to_str(max_a)\n\n # print best actions in human_readable format\n print(\"\\nAction selection table:\")\n for row_str in str_table:\n print(row_str)\n print()", "def task104(self):\n self.ex(\"\"\"\nSELECT ones.num + tens.num + 1 AS num\nFROM (SELECT 1 num\n UNION\n SELECT 2 num\n UNION\n SELECT 3 num\n UNION\n SELECT 4 num\n UNION\n SELECT 5 num\n UNION\n SELECT 6 num\n UNION\n SELECT 7 num\n UNION\n SELECT 8 num\n UNION\n SELECT 9 num\n UNION\n SELECT 0 num\n ) ones CROSS JOIN (SELECT 10 num\n UNION\n SELECT 20 num\n UNION\n SELECT 30 num\n UNION\n SELECT 40 num\n UNION\n SELECT 50 num\n UNION\n SELECT 60 num\n UNION\n SELECT 70 num\n UNION\n SELECT 80 num\n UNION\n SELECT 90 num\n UNION\n SELECT 0 num\n ) tens\nORDER BY num;\"\"\")", "def initrows(self):\n #~ self.initrows2()\n self.rows=[]\n for yy in range(self.height):\n row=[]\n for xx in range(self.width):\n if (xx,yy) in self.allsqs:\n row.append(0)\n #~ elif p in self.gatesqs:\n #~ row.append(0)\n else:\n row.append(1)\n self.rows.append(row)", "def solution(self):\n return [((\"communes of the bouches-du-rh\\u00f4ne department\", \"france\"), 0.024170164983320565)] * 100", "def set_entries(row, col, mines):\n rows_amount.set(row)\n cols_amount.set(col)\n mine_amount.set(mines)", "def generate_ger2eng(self):\n question = []\n data_len = len(self.df)+1\n n = random.randint(0, data_len)\n lst = []\n options = []\n for i in range(3):\n no = random.randint(0, data_len)\n lst.append(no)\n lst.append(n)\n lst = random.sample(lst, len(lst))\n ### Creating the question\n question.append(f'Ein Englisches Wort für \"{self.df.iloc[n, 0]}\" auswählen:')\n ### Creating options/choices\n for l in lst:\n options.append(f'{self.df.iloc[l, 1]}')\n ### Allocating the answer\n answer = self.df.iloc[n, 1]\n\n return question, options, answer", "def convert_and_add_data_questions(\n self, data_questions, min_2b_tractable, max_2b_interesting):\n for data_q in data_questions.question_list:\n for value, penetrance in data_q.participant_values.items():\n if (penetrance >= min_2b_tractable\n and penetrance <= max_2b_interesting):\n q_items = {}\n q_items[QUESTION_ITEM] = (\n \"{0} {1}\".format(data_q.output_question, value))\n q_items[PENETRANCE_ITEM] = penetrance\n q_items[DIFFICULTY_ITEM] = 1.0 - penetrance\n q = Question(q_items)\n self.question_list.append(q)\n self.total_penetrance += penetrance\n self.question_count += 1\n\n self.in_decreasing_penetrance_order = sorted(\n self.question_list,\n key=lambda quest: quest.penetrance, reverse=True)\n self.in_increasing_difficulty_order = sorted(\n self.question_list,\n key=lambda quest: quest.difficulty)\n\n return None", "def print_mistakes_table():\n conn = sq.connect(host='localhost', user='root',\n password='student', database='quiz')\n cursor = conn.cursor()\n\n cursor.execute(\"select * from mistakes\")\n data = cursor.fetchall()\n\n table = PrettyTable()\n table.field_names = ['Question', 'Given Answer','User Given Answer']\n for row in data:\n table.add_row(row)\n conn.close()\n\n return table", "def generate_eng2ger(self):\n question = []\n data_len = len(self.df)+1\n n = random.randint(0, data_len)\n lst = []\n options = []\n for i in range(3):\n no = random.randint(0, data_len)\n lst.append(no)\n lst.append(n)\n lst = random.sample(lst, len(lst))\n ### Creating the question\n question.append(f'Select a german word for \"{self.df.iloc[n, 1]}\":')\n ### Creating options/choices\n for l in lst:\n options.append(f'{self.df.iloc[l, 0]}')\n ### Allocating the answer\n answer = self.df.iloc[n, 0]\n\n return question, options, answer", "def print_results(identity_list):\n inversed_indexes = {v: k for k, v in INDEXES.items()}\n highest_val = identity_list[0][0]\n highest_hand = \"A A\"\n lowest_val = highest_val\n lowest_hand = \"A A\"\n running_total = 0.0\n\n print(f\"This table contains win percentages from comparing {HANDS} hands\")\n print(f\"against each other in {SIMULATIONS} simulations\\n\")\n print(\" A K Q J T 9 8 7 6 5 4 3 2\\n\")\n for row in range(len(INDEXES)):\n print(f\"{inversed_indexes[row]} \", end=\"\")\n for col in range(len(INDEXES)):\n print(f\"{format(identity_list[row][col], '.2f')}\", end=\" \") # To two decimal places\n\n # Update highest/lowest values\n if identity_list[row][col] > highest_val:\n highest_val = identity_list[row][col]\n highest_hand = f\"{inversed_indexes[row]} {inversed_indexes[col]}\"\n if row != col:\n suited = True if col > row else False\n highest_hand += ' suited' if suited else ' off'\n\n if identity_list[row][col] < lowest_val:\n lowest_val = identity_list[row][col]\n lowest_hand = f\"{inversed_indexes[row]} {inversed_indexes[col]}\"\n if row != col:\n suited = True if col > row else False\n lowest_hand += ' suited' if suited else ' off'\n\n # Update running total\n running_total += identity_list[row][col]\n\n print(\"\\n\")\n\n print(f\"The hand with the highest win percentage was {highest_hand} \", end=\"\")\n print(f\"with {format(highest_val, '.2f')}% of hands won\")\n print(f\"The hand with the lowest win percentage was {lowest_hand} \", end=\"\")\n print(f\"with {format(lowest_val, '.2f')}% of hands won\")\n print(f\"The average win percentage overall was \", end=\"\")\n print(f\"{format(running_total / len(INDEXES) ** 2, '.2f')}%\")", "def enumerate_test_metric(\n self, qset: Iterator[Tuple[str, float]]\n ) -> Iterator[Tuple[CompletionElement, CompletionElement]]:\n qset = sorted(qset)\n current = 0\n for query, weight in qset:\n while current < len(self) and self[current].value <= query:\n current += 1\n ind = current - 1\n el = CompletionElement(query, weight)\n if ind >= 0:\n inset = self[ind]\n le = len(inset.value)\n if le <= len(query) and inset.value == query[:le]:\n if le == len(query):\n found = inset\n el.mks0 = inset.mks0\n el.mks1 = inset.mks1\n el.mks2 = inset.mks2\n el.mks0_ = len(query)\n el.mks1_ = len(query)\n el.mks2_ = len(query)\n else:\n found = None\n el.mks0 = 0\n el.mks0_ = 0\n el.mks1 = inset.mks1 + len(query) - le\n el.mks1_ = le\n el.mks2 = inset.mks2 + len(query) - le\n el.mks2_ = le\n else:\n found = None\n el.mks0 = len(query)\n el.mks1 = len(query)\n el.mks2 = len(query)\n el.mks0_ = len(query)\n el.mks1_ = len(query)\n el.mks2_ = len(query)\n else:\n found = None\n el.mks0 = len(query)\n el.mks1 = len(query)\n el.mks2 = len(query)\n el.mks0_ = len(query)\n el.mks1_ = len(query)\n el.mks2_ = len(query)\n\n yield el, found", "def get_answers(self):\r\n anshtml = '<span class=\"openended-answer\"><pre><code>{0}</code></pre></span>'.format(self.answer)\r\n return {self.answer_id: anshtml}", "def get_score_matrix(self) -> int:", "def __init__(self, question_list):\n self.question_list = question_list\n self.question_number = 0\n self.score = 0", "def score_quiz(self, cleaned_data):\n \"\"\" question_list is 0 based, the form questions are 1-based \"\"\"\n\n print('Quiz.score_quiz - cleaned_data:', cleaned_data)\n score = Score()\n\n for form_question_str in sorted(cleaned_data):\n form_question_int = int(form_question_str.replace(\"question_\", \"\"))\n list_question_int = int(form_question_int) - 1\n answer_123_type = self.get_answer_123_type(list_question_int)\n answer_selected_str = cleaned_data[form_question_str]\n answer_selected_int = int(answer_selected_str)\n answer_text = self.get_answer_text(list_question_int, answer_selected_str)\n answer_weight_str = self.get_answer_weight(list_question_int, answer_selected_str)\n answer_weight_int = int(answer_weight_str)\n\n # print('Quiz.score_quiz - form_question_str:', str(form_question_str))\n # print('Quiz.score_quiz - form_question_int:', str(form_question_int))\n # print('Quiz.score_quiz - list_question_int:', str(list_question_int))\n # print('Quiz.score_quiz - answer_123_type:', answer_123_type)\n # print('Quiz.score_quiz - answer_selected_str:', answer_selected_str)\n # print('Quiz.score_quiz - answer_selected_int:', answer_selected_int)\n # print('Quiz.score_quiz - answer_text:', answer_text)\n # print('Quiz.score_quiz - answer_weight_str:', answer_weight_str)\n # print('Quiz.score_quiz - answer_weight_int:', answer_weight_int)\n # print('Quiz.score_quiz - score:', score)\n\n score.tally_answer(answer_123_type, answer_selected_int, answer_weight_int)\n\n return score", "def __init__(self):\r\n self.rows = [[0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9]\r\n self.block1 = []\r\n self.block5 = []\r\n self.block9 = []\r\n self.puzzle = []\r\n self.score = 0\r\n self.difficulty = 1 # By default Easy difficulty\r\n\r\n \"\"\" Creating blocks using random number generator\"\"\"\r\n while len(self.block1) < 9:\r\n r = random.randrange(1,10)\r\n if r not in self.block1:\r\n self.block1.append(r)\r\n\r\n while len(self.block5) < 9:\r\n r = random.randrange(1,10)\r\n if r not in self.block5:\r\n self.block5.append(r)\r\n\r\n while len(self.block9) < 9:\r\n r = random.randrange(1,10)\r\n if r not in self.block9:\r\n self.block9.append(r)\r\n x = 0\r\n for i in range(3):\r\n for j in range(3):\r\n self.rows[i][j] = self.block1[x]\r\n x = x+1\r\n x = 0\r\n for i in range(3, 6):\r\n for j in range(3, 6):\r\n self.rows[i][j] = self.block5[x]\r\n x = x+1\r\n x = 0\r\n for i in range(6,9):\r\n for j in range(6,9):\r\n self.rows[i][j] = self.block9[x]\r\n x = x+1\r\n \"\"\"Creating a valid solution\"\"\"\r\n self.createsolution(self.rows)", "def show_answer(self,values):\r\n values_converted = []\r\n for entry in values:\r\n new = int(entry.get())\r\n # print(new)\r\n values_converted.append(new)\r\n print(values_converted)\r\n antwoord = self.root.huidig_model.model.predict([values_converted])\r\n # for i in values_converted:\r\n # print(i)\r\n # print(antwoord)\r\n return messagebox.showinfo('Voorspelling','Voorspelling op basis van ingevoerde waarde(s) : {}'.format(antwoord))", "def create_primes_table(self, n):\n\n if n == 0:\n print('You\\'ve selected 0 primes. Here is a nonexistent table.')\n return '0 primes'\n if not isinstance(n, int) or n < 0:\n raise ValueError('Sorry, that\\'s not a valid number of primes. Please try again with an integer greater than 0.')\n\n n_primes = get_n_primes(n)\n self.table.append(n_primes)\n\n for i in range(1, len(n_primes)):\n row = []\n row.append(n_primes[i])\n for j in range(1, len(n_primes)):\n row.append(n_primes[i] * n_primes[j])\n self.table.append(row)", "def __init__(self):\n self.maxq = []\n self.minq = []", "def add_numbers():\n\n for fraction, fraction_spelling in [(0.25, 'quarter'), (0.5, 'half')]:\n add_db_number(fraction, fraction_spelling)\n\n for cardinal in xrange(60):\n add_db_number(cardinal, spell_number(cardinal))\n\n for single_digit in xrange(9):\n add_db_number(single_digit, \"zero \" + spell_number(single_digit))\n add_db_number(single_digit, \"o \" + spell_number(single_digit))", "def start_game(answer, session):\n\n print(\"start_game, answer: \", answer)\n\n attributes = reset_attributes()\n\n if answer == \"einem spieler\":\n answer = \"1\"\n if answer == \"vier spieler\":\n answer = \"4\"\n\n if answer in [str(x) for x in range(1, 5)]:\n curr_round = 1\n curr_player = 1\n state = \"Gameon\"\n scores = {x:0 for x in range(1, int(answer)+1)}\n sess_fragen = populate_questions(scores)\n \n attributes[\"question_index\"] = 0\n attributes[\"current_round\"] = curr_round\n attributes[\"current_player\"] = curr_player\n attributes[\"state\"] = state\n attributes[\"scores\"] = scores\n attributes[\"sess_questions\"] = sess_fragen\n\n if answer == \"1\":\n text = \"<s>Alles klar. \"+ TEXT_BREAK + \"Wir beginnen ein Spiel mit einem Spieler.\"+\\\n \"</s> <s>Das Quiz enthält {} Fragen.\\\n </s>\".format(TOTAL_ROUNDS)\n else:\n text = \"<s>Alles klar.\" + TEXT_BREAK + \"Wir beginnen ein Spiel mit {} Spielern\"\\\n .format(answer) +\\\n \"</s><s> Es werden jeweils {} Fragen an jeden Spieler gestellt.\\\n </s>\".format(TOTAL_ROUNDS)\n\n frage1 = ask_question(0, attributes)\n text += TICK_HELP_MESSAGE\n text += frage1\n card_text = \"Spiel mit {0} Spielern begonnen.\\n\".format(len(scores)) + clear_tags(frage1)\n\n else:\n richtige_zahl_prompt = \"Sag eine Nummer zwischen 1 und 4.\"\n text = \"Ungültige Spielerzahl. \" + richtige_zahl_prompt\n frage1 = SPIELER_PROMPT_TEXT\n card_text = text\n\n attributes[\"current_question\"] = frage1\n attributes[\"speech_output\"] = text\n attributes[\"reprompt_text\"] = frage1\n \n return response(text, should_end_session=False, reprompt_text=frage1, \\\n attributes=attributes, card_text=card_text)", "def set_qs():\n\n print \"Hi there! We're going to give you a fun grammar quiz.\"\n\n user_name = raw_input(\"To start, please enter your name: \")\n\n print \"Thanks, {}!\".format(user_name)\n\n user_num = int(raw_input(\"How many questions would you like us to generate for you? Enter a number: \"))\n\n num_qs = validate_num(user_num)\n\n print \"Ok, we'll make you a quiz with {} questions!\".format(num_qs)\n\n return num_qs", "def tab_Pdk(dmax):\r\n kmax = dmax*6 #la somme des des ne peut etre superieur a 6 fois leur nombre\r\n res = np.ones((dmax, kmax))\r\n\r\n\t#on met a zero toutes les cases qui sont impossible a completer\r\n for d in range(dmax):\r\n for k in range(kmax):\r\n if (k+1)<2*(1+d) or (k+1)>6*(d+1):\r\n res[d,k] = 0\r\n\t\t \r\n\t#on initialise pour le cas d=1\r\n for i in range(1,6):\r\n res[0][i] = 1/5\r\n\r\n\t#on met les valeurs des Q(d,k) dans toutes les cases non nulles\r\n for d in range(1,dmax):\r\n for k in range(kmax):\r\n if (res[d,k]==1) :\r\n res[d,k] = 0\r\n #on fait un for dans les valeurs qui sont realisables. \r\n #le +1 apres le min est la car nous sommes dans un range\r\n for i in range(max(k-6,2*(d+1-1)-1) , min(k-2,6*(d+1-1))+1):\r\n res[d,k] += res[d-1,i]/5\r\n\r\n\t#On multiplie toutes les cases selon la formule pour obtenir les P(d,k)\r\n for d in range(dmax):\r\n for k in range(kmax):\r\n res[d,k] = res[d,k]*(5/6)**(d+1)\r\n\t\t \r\n for d in range(dmax):\r\n res[d, 0] = 1-(5/6)**(d+1)\r\n\t\t\r\n return res", "def resultat_match(self, binomes):\n for binome in binomes:\n while True:\n score_un = self.vue.entree_resultats(binome[0])\n score_deux = self.vue.entree_resultats(binome[1])\n if score_un + score_deux != 1:\n self.vue.erreur_score()\n continue\n else:\n binome[0].ajout_score(score_un)\n binome[1].ajout_score(score_deux)\n table_players.update({\"score\": binome[0].points},\n doc_ids=[binome[0].id])\n table_players.update({\"score\": binome[1].points},\n doc_ids=[binome[1].id])\n break\n self.vue.afficher_resultats(binomes)", "def pretty_sum(digitTuples, arithmaticOperator, dttm, sumsInARow=5, writeAnswer=False):\n\t\n\t# If Answers are to be printed (for parents / teachers), append ans to the filename.\n\tif writeAnswer:\n\t\tfout = open('math_ans_'+dttm+'.html', 'w')\n\t# Otherwise for kids, append que to filename.\n\telse:\n\t\tfout = open('math_que_'+dttm+'.html', 'w')\n\t# Generic HTML and CSS starting.\n\tfout.write('<!DOCTYPE html>\\n<html lang=\"en\">\\n<head>\\n<meta charset=\"utf-8\"/>\\n<title>\\nMath sum generator</title>\\n<style>\\ntable, td { border: 2px solid black; padding: 10px; font-size: 35px;}\\n td { text-align: right; }\\n</style>\\n</head>\\n<body>\\n<table>\\n')\n\t# Initialize counter\n\tcounter = 0\n\t# a and b are the numbers\n\tfor (a, b) in digit_tuples:\n\t\t# At every sumsInARow, insert a new table row.\n\t\tif counter % sumsInARow == 0:\n\t\t\tstartCounter = counter\n\t\t\tfout.write('<tr>\\n')\n\t\t# Generate Sum.\n\t\tsumm = '<td>'\n\t\tsumm += '<br>' + a + '<br/>'\n\t\tsumm += arithmaticOperator + ' ' + b + '<br/>'\n\t\tsumm += '----------' + '<br/>'\n\t\t# If answer is to be written, do sum actually and print.\n\t\tif writeAnswer:\n\t\t\tif arithmaticOperator == '+':\n\t\t\t\tsumm += str(int(a) + int(b)) + '<br/>'\n\t\t\telif arithmaticOperator == '-':\n\t\t\t\tsumm += str(int(a) - int(b)) + '<br/>'\n\t\t\telif arithmaticOperator in ['x', 'X']:\n\t\t\t\tsumm += str(int(a) * int(b)) + '<br/>'\n\t\t# Else leave a blank line.\n\t\telse:\n\t\t\tsumm += '<br/>'\n\t\tif arithmaticOperator in ['x', 'X']:\n\t\t\tsumm += '<br/>' * (len(b)-1)\n\t\t# Close the td.\n\t\tsumm += '</td>'\t\n\t\t# Write summ to the file.\n\t\tfout.write(summ)\n\t\t# Close the row at specified interval i.e. <sumsInARow>.\n\t\tif counter - startCounter == sumsInARow - 1:\n\t\t\tfout.write('\\n</tr>\\n')\n\t\t# Increment counter.\n\t\tcounter += 1\n\t# Close HTML.\n\tfout.write('</table>\\n</body>\\n</html>')", "def calc():\n randnum1 = randint(1, 100)\n randnum2 = randint(1, 100)\n operator = choice('+-*')\n question = '{0} {1} {2}'.format(randnum1, operator, randnum2)\n if operator == '+':\n answer = randnum1 + randnum2\n elif operator == '-':\n answer = randnum1 - randnum2\n elif operator == '*':\n answer = randnum1 * randnum2\n return answer, question", "def initial_Q(self, negative):\n \n ##get each values in the Q, and change their content to given number, plan to use in Q5\n for key in self.Q.iterkeys():\n self.Q[key] = float(negative)", "def pick_questions(standard_info):\n\n questions = []\n\n # goes through each item in the standard into\n for item in standard_info:\n standard_id = item[0]\n count = item[1]\n options = standards[standard_id][\"Questions\"]\n # if the questions you have allocated to that standard are less than the amount\n # of questions that standard technically has to offer, just add the amount you are able to\n if count < len(options):\n available = options[:count]\n questions.extend(available)\n # alternatively, if the number you have allocated to that standard is exactly equal to the amount\n # of q's that standard has, just \"divide evenly\" and put all the questions in once\n elif count == len(options):\n questions.extend(options)\n # if you have more questions allocated to that standard than you have questions available, duplicate!\n elif len(options) < count:\n if not count % len(options):\n to_add = options * (count / len(options))\n questions.extend(to_add)\n else:\n to_add = options * (count / len(options))\n questions.extend(to_add)\n remainder = count % len(options)\n winners = defaultdict(int)\n while remainder:\n choices = options[:]\n winner = random.choice(choices)\n choices.pop(choices.index(winner))\n winners[winner] += 1\n remainder -= 1\n for q in options:\n print q\n for i in range(1, winners[q] + 1):\n questions.append(q)\n print questions\n\n return questions", "def get_score(self, student_answers):\r\n pass", "def populate_questions(scores):\n \n print(\"populate_questions, scores: \", str(scores))\n\n try:\n return random.sample(range(len(quiz.list_fragen)), TOTAL_ROUNDS*len(scores))\n except ValueError:\n print(\"List of questions is too short.\")", "def calc_assn_weights():\n\t\n\t\t\t#\n\t\t\t#\n\ttext(\"\"\"INSERT INTO assignments (mentor_id, course_id, cost)\n\t\t\tSELECT M.mentor_id, C.course_id, SUM(COALESCE(PW.weight_value,PT.def_weight_val))\n\t\t\tFROM mentors M, courses C\n\t\t\tJOIN course2pref C2P ON C2P.course_id = C.course_id\n\t\t\tJOIN prefs P ON P.pref_id = C2P.pref_id\n\t\t\tJOIN pref_types PT ON PT.pref_type_id = P.pref_type_id\n\t\t\tJOIN pref_weights PW ON PW.pref_type_id = P.pref_type_id\n\t\t\tLEFT JOIN choices Ch ON Ch.mentor_id = M.mentor_id AND Ch.weight_id = PW.pref_id", "def quick_quiz(character_set):", "def _fcn_hypno_to_score(self):\n self._hypno = self._hyp.gui_to_hyp()\n # Avoid updating data while setting cell :\n self._scoreSet = False\n items = ['Wake', 'N1', 'N2', 'N3', 'REM', 'Art']\n # Remove every info in the table :\n self._scoreTable.setRowCount(0)\n # Find unit conversion :\n fact = self._get_fact_from_unit()\n # Find transients :\n _, idx, stages = transient(self._hypno, self._time / fact)\n idx = np.round(10. * idx) / 10.\n # Set length of the table :\n self._scoreTable.setRowCount(len(stages))\n # Fill the table :\n for k in range(len(stages)):\n # Add stage start / end :\n self._scoreTable.setItem(k, 0, QtWidgets.QTableWidgetItem(\n str(idx[k, 0])))\n self._scoreTable.setItem(k, 1, QtWidgets.QTableWidgetItem(\n str(idx[k, 1])))\n # Add stage :\n self._scoreTable.setItem(k, 2, QtWidgets.QTableWidgetItem(\n items[stages[k]]))\n self._scoreSet = True", "def init_round_results(self):\r\n for player in range(0, 4):\r\n for round_num in range(0, 17):\r\n spin_bid = tk.Spinbox(self.master, from_=-1, to=min(round_num+1, 13), width=10)\r\n spin_bid.grid(row=round_num+2, column=player*3+1, padx=2)\r\n self.spin_bids[player].append(spin_bid)\r\n spin_trick = tk.Spinbox(self.master, from_=-1, to=min(round_num+1, 13), width=10)\r\n spin_trick.grid(row=round_num+2, column=player*3+2, padx=2)\r\n self.spin_tricks[player].append(spin_trick)\r\n lbl_score = tk.Label(self.master, text=\"0\", font='courier 10 bold', fg='green',\r\n width=10, borderwidth=2, relief=\"groove\", anchor=\"e\")\r\n if round_num % 4 == player: # mark starting player in each round\r\n spin_bid.configure(bg='LightSteelBlue2')\r\n spin_trick.configure(bg='LightSteelBlue2')\r\n lbl_score.configure(bg='LightSteelBlue2')\r\n lbl_score.grid(row=round_num+2, column=player*3+3, sticky=tk.W+tk.E, padx=2)\r\n self.lbl_scores[player].append(lbl_score)", "def show_answers(queries, answers, aggregation_predictions_string):\n\n ans_list = []\n for query, answer, predicted_agg in zip(queries, answers, aggregation_predictions_string):\n print(query)\n print(answer,type(answer))\n print(predicted_agg)\n answer = [i.strip() for i in answer.split(',')]\n print(answer)\n if (len(answer) == 1):\n if (predicted_agg == 'COUNT'):\n answer = len([i for i in answer])\n\n if (len(answer) > 1):\n if (predicted_agg == 'SUM'):\n try:\n answer = sum([float(i) for i in answer])\n except ValueError:\n answer = predicted_agg\n elif (predicted_agg == 'COUNT'):\n answer = len([i for i in answer])\n elif (predicted_agg == 'AVERAGE'):\n answer = sum([float(i) for i in answer]) / len([i for i in answer])\n elif (predicted_agg == 'NONE'):\n answer = answer\n else:\n answer = 'None'\n # if predicted_agg == \"NONE\":\n # print(\"Predicted answer: \" + answer)\n # else:\n # print(\"Predicted answer: \" + predicted_agg + \" > \" + answer)\n\n ans_list.append(answer)\n\n return ans_list", "def test_10():\n table = pandas.read_csv('data/student_score_updated_to_have_negative_marks.csv')\n query_result = show.show(table,\n metric='marks' ,\n dimensions=['subject'] ,\n summary_operator=SummaryOperators.PROPORTION_OF_SUM)\n print(query_result)\n expected_result = \"\"\" subject PROPORTION_OF_SUM of marks\n0 Social science 0.399558\n1 english 0.000000\n2 maths 0.200883\n3 science 0.399558\"\"\"\n\n expected_suggestions = \"[{'suggestion': 'There exists negative values among the values on which proportion is being applied', 'oversight': <Oversights.ATTRIBUTION_WITH_HIDDEN_NEGATIVES: 11>, 'is_row_level_suggestion': True, 'confidence_score': 1, 'row_list': [{'row': 2, 'confidence_score': 1}, {'row': 3, 'confidence_score': 1}]}]\"\n\n assert(expected_result == query_result[0].to_string())\n assert(expected_suggestions == str(query_result[1]))", "def test_answers(self):\n row = self.dataset.iloc[87]\n self.assertEqual(\"When was the trial due to start ?\", row.question)\n self.assertEqual([\"Wednesday\"], _get_answers(row))\n\n row = self.dataset.iloc[31221]\n self.assertEqual(\"Whose rights have not improved under the Taliban ?\", row.question)\n self.assertEqual([\"Conditions for women\"], _get_answers(row))\n\n row = self.dataset.iloc[45648]\n self.assertEqual(\"What does Vertu make ?\", row.question)\n self.assertEqual([\"phones starting at $ 6,000\"], _get_answers(row))", "def on_lineEdit_u_moyens_mesure_textChanged(self, p0):\n u_moyens = np.sqrt(float(self.lineEdit_u_moyens_mesure.text()))\n self.lineEdit_u_moyens.setText(str(u_moyens))\n \n for ligne in range(9):\n \n item = QtGui.QTableWidgetItem(str(u_moyens))\n self.tableWidget_mesures.setItem(ligne, 6, item)", "def makeEvalTable(table):\n\n def score(game, player):\n row, col = game.get_player_location(player)\n return table[row][col]\n\n return score", "def set_up_matrix():\n matrix= []\n row= \"1 9 3 4 5\"\n row= to_int(row)\n matrix.append(row)\n row= \"2 30 4 5 6\"\n row= to_int(row)\n matrix.append(row)\n row= \"3 8 5 6 7\"\n row= to_int(row)\n matrix.append(row)\n row= \"4 5 6 7 8\"\n row= to_int(row)\n matrix.append(row)\n row= \"5 6 7 8 9\"\n row= to_int(row)\n matrix.append(row)\n return matrix", "def print_top_answers(answers):\n print(\"Possible answers:\")\n print(\"-\" * 40)\n for res in answers:\n print(unicode(u\"{0:.2f}\\t{1}\".format(res[1], res[0])))", "def _populate_output(self):\n self._store_query_percentiles_table()", "def generate_answer(obj):\n # Initialize variables\n answer = None\n answer_table = None\n answer_q = None\n answer_r = None\n answer_a = None\n answer_b = None\n answer_d = None\n\n # Get object values\n op = obj.get('operation')\n m = obj.get('mod')\n f = obj.get('f')\n g = obj.get('g')\n h = obj.get('h')\n a = obj.get('a')\n b = obj.get('b')\n deg = obj.get('deg')\n additional_data = obj.get('additional_data')\n poly_mod = obj.get('operation_values')\n\n # Operation switch\n if op == \"display-poly\":\n answer = f # Luke\n elif op == \"add-poly\":\n answer = add_poly(f, g, m) # Luke\n elif op == \"subtract-poly\":\n answer = subtract_poly(f, g, m) # Luke\n elif op == \"multiply-poly\":\n answer = mult(f, g, m) # Janneke\n elif op == \"long-div-poly\":\n answer_q, answer_r = long_div_poly(f, g, m) # Pol\n elif op == \"euclid-poly\":\n answer_a, answer_b, answer_d = euclid_extended_poly(f, g, m) # Pol\n elif op == \"equals-poly-mod\":\n answer = equals_poly_mod(f, g, h, m) # Janneke\n elif op == \"irreducible\":\n answer = is_irreducible(f, m) # Edwin\n elif op == \"find-irred\":\n answer = find_irred(deg, m) # Edwin\n elif op == \"mod-poly\":\n if additional_data == 'add-table': # Edwin\n answer_table = add_table_field(m, poly_mod)\n elif additional_data == 'mult-table': # Edwin\n answer_table = multiply_table_field(m, poly_mod)\n elif additional_data == 'display-field': # Luke\n answer = display_field(a, m, poly_mod)\n elif additional_data == 'add-field': # Janneke\n answer = add_field(poly_mod, m, a, b)\n elif additional_data == 'subtract-field': # Janneke\n answer = subtract_field(poly_mod, m, a, b)\n elif additional_data == 'multiply-field': # Janneke\n answer = multiply_field(poly_mod, m, a, b)\n elif additional_data == 'inverse-field': # Luke\n answer = inverse_field(a, m, poly_mod)\n elif additional_data == 'division-field': # Pol\n answer = division_field(a, b, m, poly_mod)\n elif additional_data == 'equals-field': # Luke\n answer = equals_field(a, b, m, poly_mod)\n elif additional_data == 'primitive': # Pol# Different Answer\n answer = is_primitive(a, m, poly_mod)\n elif additional_data == 'find-prim': # Pol\n answer = find_primitive(m, poly_mod)\n else:\n answer = 'Operation not Supported.'\n else:\n answer = 'Operation not Supported.'\n\n # Parse result to a valid polynomial\n if answer:\n obj['answer'] = display_poly(answer, m)\n if answer_table:\n obj['answer'] = display_table(answer_table, m)\n if answer_q:\n obj['answer-q'] = display_poly(answer_q, m)\n if answer_r:\n obj['answer-r'] = display_poly(answer_r, m)\n if answer_a:\n obj['answer-a'] = display_poly(answer_a, m)\n if answer_b:\n obj['answer-b'] = display_poly(answer_b, m)\n if answer_d:\n obj['answer-d'] = display_poly(answer_d, m)\n\n return obj", "def show_answer_board(self, coords):\n Minesweeper.print_table(self.final_table, coords)", "def difficulty_choose(difficulty):\n\n question = []\n answer = []\n for i in range(quiz_num):\n if sheet1_data.row_values(i+1)[0].lower() == difficulty.lower():\n # print sheet1_data.row_values(i+1)[1]\n question.append(sheet1_data.row_values(i+1)[1])\n answer.append(sheet1_data.row_values(i+1)[2])\n return question, answer", "def main():\n\n precomp = {}\n for op1 in '+-*/':\n for op3 in '+-*/':\n for op5 in '+-*/':\n text = '4 ' + ' 4 '.join([op1, op3, op5]) + ' 4'\n precomp[eval2(text)] = text\n\n for _ in range(int(input())):\n number = int(input())\n if number in precomp:\n print(precomp[number], '=', number)\n else:\n print('no solution')", "def prompt_table(prompt, table):\n while True:\n print(prompt)\n for i in range(0, len(table)):\n row_format = \"{:>15}\" * (len(table[i]) + 1)\n print(f\"{i})\\t\" + row_format.format(\"\", *table[i]))\n response = prompt_base(\"\")\n try:\n response = int(response)\n if 0 <= response < len(table):\n return table[response]\n except:\n pass", "def insert_qst_ans(self, question, answer, level, multi=False):\r\n assert type(answer) == list or isinstance(answer, Iterable),\\\r\n \"Answer must be string or Iterable object.\"\r\n\r\n assert type(question) == list or isinstance(question, Iterable), \\\r\n \"Question must be string or Iterable object.\"\r\n\r\n assert 1 <= level <= 3, \"Unknown level\"\r\n\r\n if level == 3:\r\n table_name = DBClass.table_name_qst_level3\r\n else:\r\n table_name = DBClass.table_name_qst_levels_12\r\n\r\n command = \"INSERT INTO %s \" % table_name\r\n\r\n if not multi:\r\n command += \"(question, answer) VALUES ('%s', '%s');\" % (question,\r\n answer)\r\n else:\r\n command += \"(question, answer) VALUES\"\r\n for qst, ans in zip(question, answer):\r\n command += f\"\\n('{qst}', '{ans}'),\"\r\n command = command[:-1] + \";\"\r\n\r\n print(command)\r\n try:\r\n print(command)\r\n self.cursor.execute(command)\r\n except sq.Error as e:\r\n raise e\r\n self.conn.commit()", "def eval_exp_table(self):\n\n maximum = max(self.exp_states, key=self.exp_states.get)\n minimum = min(self.exp_states, key=self.exp_states.get)\n print(maximum, self.exp_states[maximum])\n print(minimum, self.exp_states[minimum])", "def get_standardized_champion_data():\n conn = get_connect()\n columns = [\"kills\", \"deaths\", \"assists\", \"totalDamage\", \"magicDamage\",\n \"physicalDamage\", \"trueDamage\", \"totalHeal\", \"totalDamageTaken\"]\n max_list = [0]\n min_list = [0]\n for column in columns:\n cursor = conn.execute(\"SELECT MAX(\" + column + \") FROM championData\")\n max_list.append(cursor.fetchone()[0])\n cursor = conn.execute(\"SELECT MIN(\" + column + \") FROM championData\")\n min_list.append(cursor.fetchone()[0])\n #print(max_list)\n #print(min_list)\n\n standardized_champion_data = []\n cursor = conn.execute(\"SELECT * FROM championData ORDER BY championId\")\n for row in cursor:\n single_champion_data = []\n for i in range(1,10):\n single_champion_data.append( (row[i]-min_list[i]) / (max_list[i]-min_list[i]) )\n standardized_champion_data.append(single_champion_data)\n\n conn.close()\n return standardized_champion_data", "def answers_db() -> Dict[str, List]:\n return{\"lawyer\":[\"either\",\"other\",\"law\",\"boy\"],\n \"cot_caught\":[\"different\",\"other\",\"same\"],\n \"second_person_plural\":[\"other\",\"y'all\",\"yins\",\n \"you\",\"you'uns\",\"you all\",\"you guys\",\"you lot\",\n \"yous, youse\"],\n \"yard_sale\":[\"car boot\",\"car boot sale\",\n \"carport sale\",\"garage sale\",\"jumble (sale)\",\n \"other\",\"patio sale\",\"rummage sale\",\"sidewalk sale\",\n \"stoop sale\",\"tag sale\",\"thrift sale\",\"yard sale\"],\n \"verge\":[\"beltway\",\"berm\",\"curb strip\",\n \"I have no word for this\",\"other\",\"parking\",\n \"terrace\",\"tree lawn\",\"verge\"],\n \"sandwich\":[\"baguette\",\"bomber\",\"grinder\",\"hero\",\n \"hoagie\",\"I have no word for this\",\"Italian sandwich\",\n \"other\",\"poor boy\",\"sarney\",\"sub\"],\n \"firefly\":[\"firefly\",\"I have no word for this\",\n \"I use lightning bug and firefly interchangeably\",\n \"lightning bug\",\"other\",\"peenie wallie\"],\n \"crawfish\":[\"craw\",\"crawdad\",\"crawfish\",\"crayfish\",\n \"crowfish\",\"I have no word for this critter\",\"mudbug\",\"other\"],\n \"shoes\":[\"gymshoes\",\"I have no general word for this\",\n \"jumpers\",\"other\",\"runners\",\"running shoes\",\"sand shoes\",\n \"shoes\",\"sneakers\",\"tennis shoes\",\"trainers\"],\n \"bug\":[\"basketball bug\",\"centipede\",\"doodle bug\",\n \"I have no idea what this creature is\",\n \"I know what this creature is, but have no word for it\",\n \"millipede\",\"other\",\"pill bug\",\"potato bug\",\"roll-up bug\",\n \"roly poly\",\"sow bug\",\"twiddle bug\",\"wood louse\"],\n \"kitty_corner\":[\"catercorner\",\"catty-corner\",\n \"I can only use \\\"diagonal\\\" for this\",\"I have no term for this\",\n \"kitacorner\",\"kitty-corner\",\"kitty cross\",\"kitty wampus\",\"other\"],\n \"highway\":[\"a freeway has limited access (no stop lights, no intersections), whereas a highway can have stop lights and intersections\",\n \"a freeway is bigger than a highway\",\n \"a freeway is free (i.e., doesn't charge tolls); a highway isn't\",\n \"expressway\",\"freeway\",\"highway\",\"other\",\"parkway\",\n \"throughway/thru-way\",\"turnpike\"],\n \"rain_sun\":[\"fox's wedding\",\"I have no term or expression for this\",\n \"liquid sun\",\"monkey's wedding\",\"other\",\"pineapple rain\",\"sunshower\",\n \"the devil is beating his wife\",\"the wolf is giving birth\"],\n \"frosting\":[\"both\",\"frosting\",\"icing\",\n \"icing is thinner than frosting, white, and/or made of powdered sugar and milk or lemon juice\",\n \"neither\",\"other\"],\n \"side_road\":[\"access road\",\"feeder road\",\"frontage road\",\n \"gateway\",\"I've never heard of this concept\",\"other\",\n \"service road\",\"we have them but I have no word for them\"],\n \"water_fountain\":[\"bubbler\",\"drinking fountain\",\"other\",\"water bubbler\",\n \"water fountain\"],\n \"beverage\":[\"cocola\",\"coke\",\"dope\",\"fizzy drink\",\n \"lemonade\",\"other\",\"pop\",\"soda\",\"soft drink\",\"tonic\"],\n \"rubbernecking\":[\"curiosity delay\",\"gapers' block\",\n \"gapers' delay\",\"gawk block\",\"I have no word for this\",\n \"Lookie Lou\",\"other\",\"rubberneck\",\"rubbernecking\",\n \"rubbernecking is the thing you do, not the traffice jam\"],\n \"halloween\":[\"cabbage night\",\"devil's eve\",\"devil's night\",\n \"gate night\",\"goosy night\",\"I have no word for this\",\n \"mischief night\",\"other\",\"trick night\"],\n \"brew_thru\":[\"beer barn\",\"beverage barn\",\"bootlegger\",\"brew thru\",\n \"I have never heard of such a thing\",\"other\",\"party barn\",\n \"we have these in my area, but we have no special term for them\"]}", "def administer(self):\n \n # create a dictionary that will count True and False answers\n score = {True: 0, False: 0}\n\n # iterate through each question in the list of questions\n # keep track of user's score. The question and answer are stored as\n # a list, so convert back into Question class first to use\n # ask_and_evaluate\n\n # for test questions in order:\n\n # for i in range(len(self.questions)):\n # question = Question(self.questions[i][0], self.questions[i][1])\n # score_question = question.ask_and_evaluate()\n # score[score_question] = score.get(score_question, 0) + 1\n\n\n # for random order test questions:\n list_of_questions = self.questions\n\n from random import choice\n \n for i in range(len(list_of_questions)):\n # choose a question randomly:\n question_choice = choice(list_of_questions)\n # delete that from the list of questions so it's not chosen again\n list_of_questions.remove(question_choice)\n # create a Question object from the question and answer\n question = Question(question_choice[0], question_choice[1])\n # ask and evaluate the question\n score_question = question.ask_and_evaluate()\n # record the score\n score[score_question] = score.get(score_question, 0) + 1\n\n\n # print the total number of correct and incorrect responses\n print \"Total correct: {}. Total incorrect: {}\".format(score[True], \n score[False])\n\n # return the number of incorrect and correct responses as a dictionary\n return score", "def question_finder(self):\r\n with open('Questions.csv', mode='r') as csv_file:\r\n csv_reader = csv.DictReader(csv_file)\r\n for row in csv_reader:\r\n\r\n my_tuple = finding_best_match(row, \"Questions\", self.question.split(), 2)\r\n self.percentage_list.append(my_tuple)\r\n\r\n # Checks if a whole sentence is in the asked question\r\n if question_match(join_string(self.question.split()), join_string(row[\"Questions\"].split())):\r\n self.question_match_list.append(my_tuple)\r\n\r\n \"\"\"\r\n Gives user the most appropriate answer depending on the user's question\r\n And depending on the user's emotion\r\n \"\"\"\r\n # Find Random Index (This will later be changed)\r\n random_index = random.randint(2, len(max(self.percentage_list)) - 1)\r\n\r\n if max(self.percentage_list)[0] >= 60: # The question needs to be at least 65% right\r\n self.answer = max(self.percentage_list)[random_index]\r\n print(self.answer)\r\n\r\n elif len(self.question_match_list) > 0: # Or it needs to contain a specific sentence\r\n length_list = []\r\n for i in self.question_match_list:\r\n length_list.append(len(i[1]))\r\n self.answer = self.question_match_list[length_list.index(max(length_list))][random_index]\r\n print(self.answer)\r\n\r\n elif max(self.percentage_list)[0] >= 40:\r\n print(\"Random responses\")\r\n\r\n with open('Random Responses.csv') as f:\r\n max_n = sum(1 for _line in f)\r\n\r\n random_int = random.randint(1, max_n -1)\r\n with open('Random Responses.csv', mode='r') as csv_file:\r\n csv_reader = csv.DictReader(csv_file)\r\n for i in csv_reader:\r\n self.counter += 1\r\n if random_int == self.counter:\r\n self.answer = (i[\"Responses\"])\r\n print(self.answer)\r\n else:\r\n print(\"I am sorry. I don't understand this question\")\r\n\r\n #put answer and question in the file\r\n print()\r\n print(\"if its wrong please write an answer then ENTER\")\r\n print(\"or press ENTER to skip\")\r\n better_questions_answers(self.question)", "def englishtest(result, etaoin_shrdlu=[12.02,9.1,8.12,7.68,7.31,6.95,6.28,6.02,5.92,4.32,3.98,2.88]):\n \n a = len(result)\n single = []\n for i in range(12):\n single.append(9999)\n total = 0\n single[0]= result.count(b'e') + result.count(b'E')\n single[1]= result.count(b't') + result.count(b'T')\n single[2]= result.count(b'a') + result.count(b'A')\n single[3]= result.count(b'o') + result.count(b'O')\n single[4]= result.count(b'i') + result.count(b'I')\n single[5]= result.count(b'n') + result.count(b'N')\n single[6]= result.count(b's') + result.count(b'S')\n single[7]= result.count(b'h') + result.count(b'H')\n single[8]= result.count(b'r') + result.count(b'R')\n single[9]= result.count(b'd') + result.count(b'D')\n single[10]= result.count(b'l') + result.count(b'L')\n single[11]= result.count(b'u') + result.count(b'U')\n\n for i in range(12):\n if single[i] == 0:\n single[i] =100\n else:\n single[i] = single[i]/a\n for i in single:\n total = total + i\n \n return total, single", "def get_answers(self):\r\n pass", "def questionScores():\n rank = [1,2,3,4]\n scores = \"\"\n for x in range(4):\n rand = random.randint(1, 4)\n while rank[rand-1] == 0:\n rand = random.randint(1,4)\n scores += str(rank[rand-1])\n rank[rand-1] = 0\n return scores", "def main_questions(money, grain, people):\n quest_buy = [Q1, Q2, Q3, Q6, Q7]\n question = random.choice(quest_buy)\n print(question)\n answer = input()\n while answer.isdigit() is False:\n print(INPUT_INT_VALUE)\n answer = input()\n answer = int(answer)\n if question == Q1:\n money = money - answer * 12\n elif question == Q2:\n money -= answer * 14\n elif question == Q3:\n money -= answer * 13\n elif question == Q6:\n money -= answer * 10\n elif question == Q7:\n money -= answer * 15\n grain += answer\n\n quest_sell = [Q4, Q5, Q8, Q9, Q10]\n question_2 = random.choice(quest_sell)\n print(question_2)\n answer = input()\n while answer.isdigit() is False:\n print(INPUT_INT_VALUE)\n answer = input()\n answer = int(answer)\n if question == Q4:\n money += answer * 7\n elif question == Q5:\n money += answer * 5\n elif question == Q8:\n money += answer * 6\n elif question == Q9:\n money += answer * 9\n elif question == Q10:\n money += 8\n grain -= answer\n\n print(DISTRIBUTION_OF_GRAIN)\n answer_3 = input()\n while answer_3.isdigit() is False:\n print(INPUT_INT_VALUE)\n answer_3 = input()\n answer_3 = int(answer)\n grain -= answer_3\n if grain / people > 90:\n people *= 1.1\n elif grain / people < 40:\n people *= 0.9\n return int(money), int(grain), int(people)", "def get_hint(self, data):\r\n # First, validate our inputs.\r\n try:\r\n answer = self.answer_to_str(data)\r\n except (ValueError, AttributeError):\r\n # Sometimes, we get an answer that's just not parsable. Do nothing.\r\n log.exception('Answer not parsable: ' + str(data))\r\n return\r\n if not self.validate_answer(answer):\r\n # Answer is not in the right form.\r\n log.exception('Answer not valid: ' + str(answer))\r\n return\r\n if answer not in self.user_submissions:\r\n self.user_submissions += [answer]\r\n\r\n # For all answers similar enough to our own, accumulate all hints together.\r\n # Also track the original answer of each hint.\r\n matching_answers = self.get_matching_answers(answer)\r\n matching_hints = {}\r\n for matching_answer in matching_answers:\r\n temp_dict = copy.deepcopy(self.hints[matching_answer])\r\n for key, value in temp_dict.items():\r\n # Each value now has hint, votes, matching_answer.\r\n temp_dict[key] = value + [matching_answer]\r\n matching_hints.update(temp_dict)\r\n # matching_hints now maps pk's to lists of [hint, votes, matching_answer]\r\n\r\n # Finally, randomly choose a subset of matching_hints to actually show.\r\n if not matching_hints:\r\n # No hints to give. Return.\r\n return\r\n # Get the top hint, plus two random hints.\r\n n_hints = len(matching_hints)\r\n hints = []\r\n # max(dict) returns the maximum key in dict.\r\n # The key function takes each pk, and returns the number of votes for the\r\n # hint with that pk.\r\n best_hint_index = max(matching_hints, key=lambda pk: matching_hints[pk][1])\r\n hints.append(matching_hints[best_hint_index][0])\r\n best_hint_answer = matching_hints[best_hint_index][2]\r\n # The brackets surrounding the index are for backwards compatability purposes.\r\n # (It used to be that each answer was paired with multiple hints in a list.)\r\n self.previous_answers += [[best_hint_answer, [best_hint_index]]]\r\n for _ in xrange(min(2, n_hints - 1)):\r\n # Keep making random hints until we hit a target, or run out.\r\n while True:\r\n # random.choice randomly chooses an element from its input list.\r\n # (We then unpack the item, in this case data for a hint.)\r\n (hint_index, (rand_hint, _, hint_answer)) =\\\r\n random.choice(matching_hints.items())\r\n if rand_hint not in hints:\r\n break\r\n hints.append(rand_hint)\r\n self.previous_answers += [[hint_answer, [hint_index]]]\r\n return {'hints': hints,\r\n 'answer': answer}", "def populate_score_matrices(self):\n\n ### FILL IN ###", "def calculate_questions_grade():\n import numpy as np\n with open('qqq.csv') as fo:\n headers_list = fo.readline().strip().split(',')\n headers_list.append('questions_grade')\n data_lines = []\n scores = []\n \n for line in fo:\n splits = [f.strip() for f in line.split(',')]\n # splits = [int(s) if get_type(s) is int else s for s in splits]\n scores.append(int(splits[-1]))\n data_lines.append(splits)\n\n std_dev = np.std(scores)\n grades = [score*1.0/std_dev for score in scores]\n max_grade = max(grades)\n grades = [g*10/max_grade for g in grades]\n\n graded_lines = [headers_list]\n for key, split_line in enumerate(data_lines):\n split_line.append(round(grades[key], 2))\n graded_lines.append(split_line)\n\n with open('qqq2.csv', 'w') as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerows(graded_lines)", "def multi_table(a, b):\n for i in range(1, int(b) + 1):\n print(f'{a} * {i} = {a*i}')", "def __termes_supplementaires(table_1_reduit, table_0, nbr):\n table_terme_supl = []\n msq1 = (1 << nbr)- 1 #*********************************\n for tab in table_1_reduit:\n pt_croix = __pt_facteur_unique(tab, table_0, nbr)\n table_0_reduit_croix = [i ^ tab for i in table_0 if not((i ^ tab) & pt_croix)]\n masque_tr_reduit = __terme_supp(pt_croix, table_0_reduit_croix, nbr)\n ter = tab, masque_tr_reduit\n table_terme_supl.append(ter)\n table_compl_reduite = []\n\n for i in table_terme_supl:\n term, t_msq = i\n for msq in t_msq:\n j = term & msq, msq ^ msq1 # normalisation (& msq)****************\n table_compl_reduite.append(j)\n table_compl_reduite = list(set(table_compl_reduite))\n return table_compl_reduite", "def __init__(self, question_txt, question_prefix, col_range):\n super().__init__(question_txt, question_prefix, col_range)\n self.agree_lot_rng = col_range[0]\n self.agree_little_rng = col_range[1]\n self.neither_rng = col_range[2]\n self.dis_little_rng = col_range[3]\n self.dis_lot_rng = col_range[4]", "def arithmetic_arranger(problems, display_answers=False):\n\n ## Separate input in lists check if input conforms to rules\n first_operands = []\n second_operands = []\n operators = []\n\n if len(problems) > 5:\n return 'Error: Too many problems.'\n\n for problem in problems:\n # Check 4 digits max\n first_operand = problem.split()[0]\n operator = problem.split()[1]\n second_operand = problem.split()[2]\n if len(first_operand) > 4 or len(second_operand) > 4:\n return 'Error: Numbers cannot be more than four digits.'\n break\n # Separate operands and check only digits\n try:\n first_operands.append(int(first_operand))\n second_operands.append(int(second_operand))\n except ValueError:\n return 'Error: Numbers must only contain digits.'\n # Separate operators\n if operator == '*' or operator == '/':\n return \"Error: Operator must be '+' or '-'.\"\n else:\n operators.append(operator)\n\n ## Perform the calculation(s)\n results = []\n\n for count in range(len(first_operands)):\n if operators[count] == '+':\n results.append(first_operands[count] + second_operands[count])\n else:\n results.append(first_operands[count] - second_operands[count])\n\n ## Draw the arranged problems\n \n for count, item in enumerate(first_operands):\n if len(str(first_operands[count])) > len(str(second_operands[count])):\n max_operand_length = len(str(first_operands[count]))\n else:\n max_operand_length = len(str(second_operands[count]))\n \n if count == 0:\n first_line = ' ' + (max_operand_length - len(str(first_operands[count]))) * ' ' + str(first_operands[count])\n second_line = operators[count] + ' ' + (max_operand_length - len(str(second_operands[count]))) * ' ' + str(second_operands[count])\n third_line = '--' + max_operand_length * '-'\n fourth_line = (max_operand_length + 2 - len(str(results[count]))) * ' ' + str(results[count])\n else:\n first_line += ' ' + ' ' + (max_operand_length - len(str(first_operands[count]))) * ' ' + str(first_operands[count])\n second_line += ' ' + operators[count] + ' ' + (max_operand_length - len(str(second_operands[count]))) * ' ' + str(second_operands[count])\n third_line += ' ' + '--' + max_operand_length * '-'\n fourth_line += ' ' + (max_operand_length + 2 - len(str(results[count]))) * ' ' + str(results[count])\n\n arranged_problems = first_line + '\\n' + second_line + '\\n' + third_line\n\n ## Return the arranged problems\n\n if display_answers:\n arranged_problems += '\\n' + fourth_line\n\n return arranged_problems", "def evaluate_no_answers(self):\n answer_range = self.answer_range\n feature_range = self.feature_range\n repetitions = self.repetitions\n\n df_cleaned_bin = pd.read_csv(self.path_bin)\n df_answers_grouped = pd.read_pickle(self.path_answers_clean_grouped)\n df_actual_metadata = pd.read_csv(self.path_answers_metadata, index_col=0, header=[0, 1])\n df_actual_metadata = df_actual_metadata['actual']\n\n # # feature_range = [2,3]\n # # answer_range = [2,10]\n # repetitions=5\n\n result = {}\n for no_answers in answer_range:\n print('calculating. number of answers: ', no_answers)\n evaluator = ERNofeaturesEvaluator(None, None, df_cleaned_bin, df_actual_metadata=df_actual_metadata, target=self.target, dataset_name=self.dataset_name, df_answers_grouped=df_answers_grouped, bootstrap_n=no_answers, repetitions=repetitions, replace=False)\n raw_data = evaluator.evaluate(feature_range, condition=ERCondition.CSFS) # raw_data is dict: {CONDITION: {NOFEATURES: [AUCS]}}\n result[no_answers] = raw_data[ERCondition.CSFS]\n\n # result is dict: {no_answers: {NOFEATURES: [AUCS]}}\n result_restructured = dict()\n for no_features in feature_range:\n result_restructured[no_features] = {no_answers: result[no_answers][no_features] for no_answers in answer_range}\n # {no_features: {no_answers: result[no_answers][no_features]} for no_features in feature_range for no_answers in answer_range }\n result = result_restructured # { 2 features: {2answers: [], 3 answers: [], 4 answers: [],...}, 3 features: [2answers:[], 3answers:[]},...}\n\n # print(result)\n data_aggregated = dict()\n for no_features in result:\n print('aggregating. number of features: ', no_features)\n data = {\n 'mean': [np.mean(result[no_features][no_answers]) for no_answers in answer_range],\n 'ci_lo': [ssw.DescrStatsW(result[no_features][no_answers]).tconfint_mean()[0] for no_answers in answer_range],\n 'ci_hi': [ssw.DescrStatsW(result[no_features][no_answers]).tconfint_mean()[1]for no_answers in answer_range],\n 'std': [np.std(result[no_features][no_answers]) for no_answers in answer_range],\n }\n\n df = pd.DataFrame(data)\n # print(no_features)\n # print(tabulate(df))\n data_aggregated[no_features] = df\n df_combined = pd.concat(data_aggregated, axis='columns')\n # exit()\n df_combined.index = answer_range\n df_combined.to_pickle(self.path_no_answers_vs_auc)", "def solveMontyHall(num):\n\n\t# the number that the first choice is the anwser\n\tNumFirst = 0\n\t# the number that the switched choice is the anwser\n\tNumSwitch = 0\n\n\tfor i in xrange(num):\n\t\t# Set randomly the index of the prize and first choice\n\t\tprizeIndex = random.randint(0, 2)\n\t\tfirstChoiceIndex = random.randint(0, 2)\n\n\t\t# insist on the first choice \n\t\tif firstChoiceIndex == prizeIndex:\n\t\t\tNumFirst += 1\n\t\telse: # failure\n\t\t\tpass\n\n\t\t# choose to switch \n\t\tif firstChoiceIndex == prizeIndex:\n\t\t\tpass # failure\n\t\telse: # means you get the right answer\n\t\t\tNumSwitch += 1\n\n\treturn (NumFirst / float(num), NumSwitch / float(num))", "def process_question(qu):\n\n ## global ranking\n rank_info = {}\n rank_info_k = [\"viewcount\",\"score\",\"favoritecount\"]\n for k in rank_info_k:\n rank_info[k] = int(qu[k])\n qu.pop(k,None)\n\n rank_info[\"creationdate\"] = qu[\"creationdate\"]\n\n if qu[\"acceptedanswer\"]:\n qu[\"acceptedanswer\"] = list(qu[\"acceptedanswer\"])\n else:\n qu[\"acceptedanswer\"] = []\n\n qu.pop('comments',None) # discard comments, maybe add back later\n qu[\"rank_info\"] = rank_info\n\n return qu", "def display_possible_answers(question):\n answers = question['incorrect'] + [question['correct']]\n random.shuffle(answers)\n answer_dict = {}\n for i, answer in enumerate(answers):\n answer_dict[str(i + 1)] = answer\n print(f\"{i + 1}: {answer}\\n\")\n return answer_dict", "def ttlist(qa,a1a,a0a,ans1,ans0,sentences):\n (questions,answers1,answers0)=sentences\n li=[]\n ones=0\n zeros=0\n for i in range(0,len(ans1)):\n li.append(q(qa[i],a1a[ones:ones+ans1[i]],a0a[zeros:zeros+ans0[i]],questions[i],\n answers1[ones:ones+ans1[i]],answers0[zeros:zeros+ans0[i]]))\n ones+=ans1[i]\n zeros+=ans0[i]\n return li", "def build_matrix(prompt_size, prompt_matrix):\n\n rows, cols = [int(i) for i in get_num_row(\n 2,\n lambda x: int(x),\n prompt=prompt_size\n )]\n matrix = []\n\n print(prompt_matrix)\n for i in range(rows):\n row = get_num_row(cols, lambda x: float(x))\n matrix.append(row)\n\n return matrix", "def make_table(m, n):\n return [[0] * n for _ in range(m)]", "def similarityQuestions(self, row):\n q1 = self.stemQuestion(row[3])\n q2 = self.stemQuestion(row[4])\n \n # Compute similarity of the two questions#\n sim = seqmatch(None, q1, q2).ratio()\n #sim = self.computeSimilarity(q1, q2)\n if sim > 0.6: #we guess they are duplicate questions\n if row[5] == \"1\": #true positive\n self.tp += 1\n else: #false positive\n self.fp += 1\n else: #we guess they are different questions\n if row[5] == \"0\": #true negative\n self.tn += 1\n else: #false negative\n self.fn += 1", "def input_tables_01(data_in, nbr_variable):\n terme_0 = set()\n terme_1 = set()\n erreur = False\n # par défaut on commence à lire les termes 1\n type_terme = 1\n while len(data_in) != 0:\n rep = data_in.pop()\n if rep == \"t1\":\n type_terme = 1\n elif rep == \"t0\":\n type_terme = 0\n else:\n terme, erreur = acqui_terme(rep, nbr_variable)\n if erreur:\n terme_0 = set()\n terme_1 = set()\n break\n if type_terme == 0:\n terme_0.add(terme)\n else:\n terme_1.add(terme)\n return (list(terme_1), list(terme_0), erreur) # liste normalisée", "def get_score(self, student_answers):\r\n _ = self.capa_system.i18n.ugettext\r\n\r\n log.debug('%s: student_answers=%s', unicode(self), student_answers)\r\n\r\n # ordered list of answer id's\r\n idset = sorted(self.answer_ids)\r\n try:\r\n # ordered list of answers\r\n submission = [student_answers[k] for k in idset]\r\n except Exception as err:\r\n msg = u\"[courseware.capa.responsetypes.customresponse] {message}\\n idset = {idset}, error = {err}\".format(\r\n message= _(\"error getting student answer from {student_answers}\").format(student_answers=student_answers),\r\n idset=idset,\r\n err=err\r\n )\r\n\r\n log.error(\r\n \"[courseware.capa.responsetypes.customresponse] error getting\"\r\n \" student answer from %s\"\r\n \"\\n idset = %s, error = %s\",\r\n student_answers, idset, err\r\n )\r\n raise Exception(msg)\r\n\r\n # global variable in context which holds the Presentation MathML from dynamic math input\r\n # ordered list of dynamath responses\r\n dynamath = [student_answers.get(k + '_dynamath', None) for k in idset]\r\n\r\n # if there is only one box, and it's empty, then don't evaluate\r\n if len(idset) == 1 and not submission[0]:\r\n # default to no error message on empty answer (to be consistent with other\r\n # responsetypes) but allow author to still have the old behavior by setting\r\n # empty_answer_err attribute\r\n msg = (u'<span class=\"inline-error\">{0}</span>'.format(_(u'No answer entered!'))\r\n if self.xml.get('empty_answer_err') else '')\r\n return CorrectMap(idset[0], 'incorrect', msg=msg)\r\n\r\n # NOTE: correct = 'unknown' could be dangerous. Inputtypes such as textline are\r\n # not expecting 'unknown's\r\n correct = ['unknown'] * len(idset)\r\n messages = [''] * len(idset)\r\n overall_message = \"\"\r\n\r\n # put these in the context of the check function evaluator\r\n # note that this doesn't help the \"cfn\" version - only the exec version\r\n self.context.update({\r\n # my ID\r\n 'response_id': self.id,\r\n\r\n # expected answer (if given as attribute)\r\n 'expect': self.expect,\r\n\r\n # ordered list of student answers from entry boxes in our subtree\r\n 'submission': submission,\r\n\r\n # ordered list of ID's of all entry boxes in our subtree\r\n 'idset': idset,\r\n\r\n # ordered list of all javascript inputs in our subtree\r\n 'dynamath': dynamath,\r\n\r\n # dict of student's responses, with keys being entry box IDs\r\n 'answers': student_answers,\r\n\r\n # the list to be filled in by the check function\r\n 'correct': correct,\r\n\r\n # the list of messages to be filled in by the check function\r\n 'messages': messages,\r\n\r\n # a message that applies to the entire response\r\n # instead of a particular input\r\n 'overall_message': overall_message,\r\n\r\n # any options to be passed to the cfn\r\n 'options': self.xml.get('options'),\r\n 'testdat': 'hello world',\r\n })\r\n\r\n # Pass DEBUG to the check function.\r\n self.context['debug'] = self.capa_system.DEBUG\r\n\r\n # Run the check function\r\n self.execute_check_function(idset, submission)\r\n\r\n # build map giving \"correct\"ness of the answer(s)\r\n correct = self.context['correct']\r\n messages = self.context['messages']\r\n overall_message = self.clean_message_html(self.context['overall_message'])\r\n correct_map = CorrectMap()\r\n correct_map.set_overall_message(overall_message)\r\n\r\n for k in range(len(idset)):\r\n npoints = self.maxpoints[idset[k]] if correct[k] == 'correct' else 0\r\n correct_map.set(idset[k], correct[k], msg=messages[k],\r\n npoints=npoints)\r\n return correct_map", "def get_answers(self):\r\n return self.answer_values", "def question_2(patient):\n result = {}\n for disease, symptoms in patient.symptoms.iteritems():\n symptoms_list = generate_all_symptoms(symptoms)\n if not symptoms_list: # there are no unknowns\n symptoms_list = [symptoms]\n max_prob = 0.0\n min_prob = 1.0\n for sym_list in symptoms_list:\n prob = calculate_probability(disease, sym_list)\n if prob > max_prob:\n max_prob = prob\n if prob < min_prob:\n min_prob = prob\n min_str = \"%.4f\" % min_prob\n max_str = \"%.4f\" % max_prob\n result[disease.name] = [min_str, max_str]\n patient.set_max_prob(disease, max_prob)\n patient.set_min_prob(disease, min_prob)\n return result", "def test_metric(self, qset: Iterator[Tuple[str, float]]) -> Dict[str, float]:\n res = dict(mks0=0.0, mks1=0.0, mks2=0.0, sum_weights=0.0, sum_wlen=0.0, n=0)\n hist = {k: {} for k in {\"mks0\", \"mks1\", \"mks2\", \"l\"}} # pylint: disable=C0208\n wei = {k: {} for k in hist}\n res[\"hist\"] = hist\n res[\"histnow\"] = wei\n\n for el, _ in self.enumerate_test_metric(qset):\n le = len(el.value)\n w = el.weight\n res[\"mks0\"] += w * el.mks0\n res[\"mks1\"] += w * el.mks1\n res[\"mks2\"] += w * el.mks2\n res[\"sum_weights\"] += w\n res[\"sum_wlen\"] += w * le\n res[\"n\"] += 1\n\n if el.mks0 not in hist[\"mks0\"]:\n hist[\"mks0\"][el.mks0] = w\n wei[\"mks0\"][el.mks0] = 1\n else:\n hist[\"mks0\"][el.mks0] += w\n wei[\"mks0\"][el.mks0] += 1\n if el.mks1 not in hist[\"mks1\"]:\n hist[\"mks1\"][el.mks1] = w\n wei[\"mks1\"][el.mks1] = 1\n else:\n hist[\"mks1\"][el.mks1] += w\n wei[\"mks1\"][el.mks1] += 1\n if el.mks2 not in hist[\"mks2\"]:\n hist[\"mks2\"][el.mks2] = w\n wei[\"mks2\"][el.mks2] = 1\n else:\n hist[\"mks2\"][el.mks2] += w\n wei[\"mks2\"][el.mks2] += 1\n if le not in hist[\"l\"]:\n hist[\"l\"][le] = w\n wei[\"l\"][le] = 1\n else:\n hist[\"l\"][le] += w\n wei[\"l\"][le] += 1\n return res", "def quizScores():\n quizScores = []\n for x in range(18):\n quizScores.append(questionScores())\n return quizScores", "def build(self):\r\n matrix = self.create2dArray()\r\n cols = self.get_random_cols(matrix)\r\n # Question 1\r\n #ques = randrange(len(matrix)) \r\n ques = cols[0]\r\n answer = randint(1,(len(matrix[ques][0])-1))\r\n answer1 = matrix[ques][0][0]\r\n ques1 = matrix[ques][0][answer]\r\n # Question 2\r\n #ques = randrange(len(matrix))\r\n ques = cols[1] \r\n answer = randint(1,(len(matrix[ques][0])-1))\r\n answer2 = matrix[ques][0][0]\r\n ques2 = matrix[ques][0][answer]\r\n # Question 3\r\n #ques = randrange(len(matrix))\r\n ques = cols[2] \r\n answer = randint(1,(len(matrix[ques][0])-1))\r\n answer3 = matrix[ques][0][0]\r\n ques3 = matrix[ques][0][answer]\r\n # Question 4\r\n #ques = randrange(len(matrix))\r\n ques = cols[3] \r\n answer = randint(1,(len(matrix[ques][0])-1))\r\n answer4 = matrix[ques][0][0]\r\n ques4 = matrix[ques][0][answer]\r\n # Question 5\r\n #ques = randrange(len(matrix))\r\n ques = cols[4] \r\n answer = randint(1,(len(matrix[ques][0])-1))\r\n answer5 = matrix[ques][0][0]\r\n ques5 = matrix[ques][0][answer]\r\n\r\n open(\"marking_key.txt\",\"w+\").close()\r\n file = open(\"marking_key.txt\",\"w+\")\r\n file.write(answer5+'.'+ques5+'\\n'+answer4+'.'+ques4+'\\n'+answer3+'.'+ques3+'\\n'+answer2+'.'+ques2+'\\n'+answer1+'.'+ques1)\r\n file.close()\r\n\r\n open(\"mark_key.txt\",\"w+\").close()\r\n file = open(\"mark_key.txt\",\"w+\")\r\n file.write(ques5+' '+ques4+' '+ques3+' '+ques2+' '+ques1)\r\n file.close()\r\n\r\n history = self.history()\r\n\r\n oshima = Test(que5=ques4,que4=ques5,que3=ques1,que2=ques3,que1=ques2,\r\n ans1=('5. '+answer1),ans2=('4. '+answer2),ans3=('3. '+answer3),\r\n ans4=('2. '+answer4),ans5=('1. '+answer5),que6=history)\r\n oshima.start()\r\n return oshima", "def solution(n, s, a, b, fares):\n\n table = [[float(\"inf\")]*n for _ in range(n)]\n for (c, d, f) in fares:\n table[c-1][d-1] = f\n table[d-1][c-1] = f\n\n for idx in range(n):\n table[idx][idx] = 0\n\n # do floyd to find all shortest paths\n for kdx in range(n):\n for idx in range(n):\n for jdx in range(n):\n table[idx][jdx] = min(table[idx][jdx], table[idx][kdx] + table[kdx][jdx])\n \n# for row in table:\n# print(row)\n \n answer = table[s-1][a-1] + table[s-1][b-1]\n # print(\"seperate:\", answer)\n for idx in range(n):\n # print(\"idx 경유:\", idx, table[s-1][idx] + table[idx][a-1] + table[idx][b-1])\n answer = min(answer, table[s-1][idx] + table[idx][a-1] + table[idx][b-1])\n\n # print(\"answer:\", answer)\n return answer", "def picksomequestions():\n answers = dict()\n for question in nlist:\n answers[question[0]] = question[1]\n if len(answers.keys()) > 49:\n break\n\n return answers", "def get_score(self, student_answers):\r\n student_answer = student_answers[self.answer_id].strip()\r\n correct = self.check_string(self.correct_answer, student_answer)\r\n return CorrectMap(self.answer_id, 'correct' if correct else 'incorrect')", "def report_calc_lattice(self):\n print(\" h k q_obs q_calc\")\n q_calc = np.sqrt(self.calc_q_square())\n for a, b, c, d in zip(self.h, self.k, self.q, q_calc):\n print(\"{0: 1d} {1: 1d} {2: .3f} {3: .3f}\".format(a, b, c, d))", "def init_PQ(self):\n self.curr_k = 0\n self.min_tr_in_k = sys.float_info.max\n self.sum_tr_in_k = 0" ]
[ "0.55837727", "0.5510911", "0.5427294", "0.541556", "0.5354501", "0.5270408", "0.5229382", "0.5213455", "0.52047086", "0.5183162", "0.5158478", "0.5134398", "0.509664", "0.5092691", "0.5080551", "0.50666213", "0.50628793", "0.50590247", "0.5057949", "0.5032783", "0.5013241", "0.5008784", "0.49926734", "0.49782538", "0.49737582", "0.49610308", "0.49488398", "0.494714", "0.49362808", "0.49351496", "0.4931934", "0.48936158", "0.4892003", "0.4875224", "0.48712698", "0.48702535", "0.48696777", "0.4843342", "0.4837223", "0.48369813", "0.48356575", "0.48345068", "0.4818779", "0.48147044", "0.48117924", "0.4810039", "0.4795328", "0.47878394", "0.47833803", "0.47694242", "0.47684887", "0.47679877", "0.4760949", "0.47589526", "0.47586653", "0.47543254", "0.47512436", "0.47334006", "0.47248486", "0.4723305", "0.47189254", "0.47179347", "0.47144675", "0.4706247", "0.47038653", "0.47034463", "0.46984962", "0.4693221", "0.46878657", "0.46838945", "0.46792608", "0.4668485", "0.46650285", "0.46626237", "0.46607322", "0.46602", "0.46575257", "0.46529177", "0.46463656", "0.46461257", "0.46297604", "0.462809", "0.46252507", "0.46221578", "0.4614115", "0.46121398", "0.46090803", "0.46053863", "0.46045494", "0.46033975", "0.45970082", "0.4595165", "0.45898142", "0.4588292", "0.45815066", "0.4579989", "0.45793593", "0.4565726", "0.45637736", "0.4563369", "0.45630044" ]
0.0
-1
gets number of flags nearby
def flags_nearby(self, y, x): count = 0 l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: continue if self.table_state[ye][xe] == Minesweeper.FLAG: count += 1 return str(count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_neighbor_flags(self, i, j):\n return np.count_nonzero(self.flags[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])", "def get_flag_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM flags\")\n return done", "def count_neighbor_flags(self, x, y):\n\t\treturn sum(self.marks[n][m] == FLAG for (n, m) in self.get_valid_neighbors(x, y))", "def __len__(self):\n\n return len(self.flags)", "def countCmdLineFlags(options, flag):\n counter = 0\n # make sure only flag was supplied\n for key, value in options.__dict__.items():\n if key == flag:\n next\n # If anything but flag was called, increment\n elif value:\n counter += 1\n\n return counter", "def flags_decomposer(flags):\n l = 0\n \n if flags & 2 ** 1:\n l = 1\n \n if flags & 2 ** 4:\n l = 2\n \n return l", "def getting_flags_locations(self):\n print(self.flags)\n self.line_finder.find_line(self.html)", "def count_set_bits(bitmap):\n bmp = bitmap\n count = 0\n n = 1\n while bmp > 0:\n if bmp & 1:\n count += 1\n bmp = bmp >> 1\n n = n + 1\n return count", "def count():", "def count_lead_zs(self,x):\n display_mask = 1 << 31\n cnt = 0\n for c in xrange(1,33):\n if((x & display_mask) == 0):\n cnt += 1\n else:\n return cnt\n x <<= 1\n return cnt", "def number_bits_in_cardinality(self,card):\n return 32 - self.count_lead_zs(card)", "def __int__(self):\n\n return self.bitflags", "def getHitCount(self): #$NON-NLS-1$\r", "def obstacle_count(self):\n self.wide_scan()\n found_something = False\n counter = 0\n for distance in self.scan:\n if distance and distance < 200 and not found_something:\n found_something = True\n counter += 1\n print(\"Object # %d found, I think\" % counter)\n if distance and distance > 200 and found_something:\n found_something = False\n print(\"\\n----I SEE %d OBJECTS----\\n\" % counter)", "def count_flag(cls, flags: dict, flag: str, current_tensor: tvm.tensor.Tensor) -> None:\n # Always add 1 count for total_flag when there is a flag\n cls.add_flag(flags, \"total\", current_tensor)\n found_flag = False\n # Add 1 count for corresponding flag\n for key in flags:\n if flag is None or key is None:\n continue\n if flag.find(key) >= 0:\n found_flag = True\n cls.add_flag(flags, key, current_tensor)\n # When there isn't a match, if flag is empty, add 1 for None, or add 1 for unknown\n if not found_flag:\n if flag is None or flag == \"\":\n cls.add_flag(flags, None, current_tensor)\n else:\n cls.add_flag(flags, \"unknown\", current_tensor)", "def count_masked_pixel(skymap):\n return len(skymap[skymap == 1.0])", "def flags(self,index):\n return self._flags", "def example_count_set_bits(value):\n n = 0\n while value:\n n += 1\n value &= value-1\n return n", "def __len__(self):\n i = 0\n for S in self.states():\n i += 1\n return i", "def hive_flags(self):\n return self.unpack_dword(0x8)", "def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):\n split_inside_flags = torch.split(inside_flags, num_level_anchors)\n num_level_anchors_inside = [\n int(flags.sum()) for flags in split_inside_flags\n ]\n return num_level_anchors_inside", "def _flags(self):\n done, data = self._request('GE')\n if done:\n flags = int(data[1], 16)\n else:\n raise EvseError\n return {\n 'service_level': (flags & 0x0001) + 1,\n 'diode_check': not flags & 0x0002,\n 'vent_required': not flags & 0x0004,\n 'ground_check': not flags & 0x0008,\n 'stuck_relay_check': not flags & 0x0010,\n 'auto_service_level': not flags & 0x0020,\n 'auto_start': not flags & 0x0040,\n 'serial_debug': not not flags & 0x0080,\n 'lcd_type': 'monochrome' if flags & 0x0100 else 'rgb',\n 'gfi_self_test': not flags & 0x0200\n }", "def easy_count_set_bits(num):\n print('Counted {} set bits'.format(bin(num).count('1')))", "def number_bites_accessed(self) -> int:\r\n accessed_bites = {\r\n row['bite']\r\n for row in self.rows\r\n }\r\n\r\n return len(accessed_bites)", "def flags(self):\n return self._flags", "def numAtoms(self, flag=None):\n\n return len(self._getSubset(flag)) if flag else self._n_atoms", "def number_of_bits(self):\n return self.numbits", "def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):\n\n split_inside_flags = torch.split(inside_flags, num_level_anchors)\n num_level_anchors_inside = [\n int(flags.sum()) for flags in split_inside_flags\n ]\n return num_level_anchors_inside", "def user_iflags_size(*args):\n return _ida_hexrays.user_iflags_size(*args)", "def remaining(self):\n\t\tmines = sum(1 for _ in self.get_mines())\n\t\tmarked = sum(1 for x in range(self.width)\n\t\t\t\t\t for y in range(self.height) if self.marks[x][y] == FLAG)\n\t\treturn mines - marked", "def number_of_bits(self) -> int:\n raise NotImplementedError('To be Overidden by the derived class')", "def count(self):\n return sum([self.bits[x][y] for x in range(self.n_rows)\n for y in range(self.n_columns)])", "def count_num_masked_tiles(subgrid):\n\n\tnum_masked_tiles = 0\n\tfor tile in subgrid:\n\t\tif (tile == MaskedTile.MASKED) or (tile == MaskedTile.FLAG):\n\t\t\tnum_masked_tiles += 1\n\n\treturn num_masked_tiles", "def get_bitmask(self):\r\n return self.__bitmask__", "def get_control_count(cmd):\n return len(cmd.control_qubits)", "def getNbrOfBit(self):\n return DPxGetDinNumBits()", "def hive_flags(self):\n return self.unpack_dword(0x90)", "def _get_nr_of_bits(self):\n return sum(self._size_var)", "def get_num_landmarks(self):\n return len(self.landmarks_info)", "def flags(self) -> UserFlag:", "def get_flags(self):\n return self.short_flag, self.long_flag", "def number_of_atoms_within_radius(self, distance_cutoff):\n n_atoms = 0\n atom_ids = []\n for contact in self.nearby_atoms:\n other_id = contact.atom_id_no_altloc()\n if (not other_id in atom_ids):\n if (contact.distance() < distance_cutoff):\n n_atoms += 1\n atom_ids.append(other_id) # check for alt confs.\n return n_atoms", "def __len__(self):\r\n return numBits(self.n)", "def get_flags(self):\n\n return self._flags", "def get_length(binary_mask):\n mask_T = binary_mask.T\n\n tip_index = get_index_of_tip(mask_T)\n shoulder_index = get_index_of_shoulder(mask_T)\n\n return shoulder_index - tip_index", "def _getNumcam( self, bSeed ):\n\n\t\treturn ( ( bSeed >> 20 ) & 0xF ) + 1", "def number_of_backbone_oxygens(self, distance_cutoff=3.0):\n n_bb_ox = 0\n for contact in self.nearby_atoms :\n if (contact.atom_name() == \"O\"):\n if (contact.distance() <= distance_cutoff):\n if (not contact.resname() in WATER_RES_NAMES):\n n_bb_ox += 1\n return n_bb_ox", "def get_number_of_atoms_to_optimize(self):\n v = self.c.get(simulation_cell=True)\n return len(v.data.stoichiometry)", "def get_max_num_cr(gdq_cube, jump_flag): # pragma: no cover\n cr_flagged = np.empty(gdq_cube.shape, dtype=np.uint8)\n cr_flagged[:] = np.where(np.bitwise_and(gdq_cube, jump_flag), 1, 0)\n max_num_cr = cr_flagged.sum(axis=0, dtype=np.int32).max()\n del cr_flagged\n\n return max_num_cr", "def count_ones(n):\n s = 0\n mask = 1\n for i in xrange(16):\n if (mask << i) & n:\n s += 1\n return s", "def count_oob(cube):\n out_range = (cube > 4).any(1) | (cube < -4).any(1)\n out_range = out_range.sum() / cube.shape[0]\n return out_range", "def get_bitsize(self) -> int:\n return self._surface.get_bitsize()", "def stats(self):\n nqbits = self.operator.num_qubits", "def _get_maskLength(self):\n return self.__maskLength", "def flags(self):\n return self.__flag_set", "def NumBits(self):\n num_bits = 8*len(self.output)\n if self.out_boff % 8:\n num_bits -= 8\n num_bits += self.out_boff\n if num_bits < 0:\n print \"What the...\"\n return num_bits", "def bit_length(self, ???):", "def count(bits: int) -> int:\n return len(to_list(bits)) # I'm lazy", "def count(self):\n nreq, nres = 0, 0\n for entry in self.__history:\n if entry.oreq is not None:\n nreq += 1\n if entry.ores is not None:\n nres += 1\n return nreq, nres", "def count_points(roi):\r\n # Performing Mean Shift Filtering\r\n shifted = cv2.pyrMeanShiftFiltering(roi, 21, 51)\r\n\r\n # Converting the image to grayscale\r\n gray = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)\r\n\r\n # Thresholding using Binary and OTSU\r\n thrsh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\r\n # Using Watershed Algorithm\r\n D = ndimage.distance_transform_edt(thrsh)\r\n localMax = peak_local_max(D, indices=False, min_distance=1, labels=thrsh)\r\n markers = ndimage.label(localMax)[0]\r\n lbls = watershed(-D, markers, mask=thrsh)\r\n \r\n return lbls, len(np.unique(lbls)) - 1", "def count_significant_bits(input_x: int) -> int:\n x = input_x\n for i in range(x.bit_length()):\n if x & (1 << i) > 0:\n return x.bit_length() - i\n return 0", "def get_marble_count(self):", "def len_pref(self, nmsk):\n if \"01\" not in nmsk:\n return nmsk.count(\"1\")\n else:\n return -1", "def flags(self) -> Optional[int]:\n return self.get(\"/Ff\")", "def number_bites_resolved(self) -> int:\r\n resolved_bites = {\r\n row['bite']\r\n for row in self.rows\r\n if row['completed'] == 'True'\r\n }\r\n\r\n return len(resolved_bites)", "def timerCount(cmds):\n return int(sum(np.asarray(cmds) == 0x400001)) # numpy version\n #return cmds.count(0x400001) # python list version", "def GetAGWFlags(self):\r\n \r\n return self._agwFlags", "def __len__(self):\n\t\treturn len(self._idle) + len(self._running)", "def num_bin(N, places=8):\n return [(N >> k) & 0x1 for k in range(places)]", "def get_lengths_from_binary_sequence_mask(mask: torch.Tensor):\n return mask.long().sum(-1)", "def listFlag(flaglist):\n flag = 0\n for index, item in enumerate(flaglist):\n flag = setFlag(flag, index, item)\n return flag", "def num_clbits(self):\n return 0", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def osd(counts):\n return (counts!=0).sum(), (counts==1).sum(), (counts==2).sum()", "def get_all_flags(options):\n flags = []\n if options.inputFlag:\n flags.append(try_to_int(options.inputFlag))\n if options.outputFlags:\n for flag in options.outputFlags:\n flags.append(try_to_int(flag))\n return flags", "def checkNumNeighbors():", "def num_atoms(self):\n return self.h5['{}/{}'.format(SETTINGS, N_ATOMS)][()]", "def get_lengths_from_binary_sequence_mask(\n mask: torch.BoolTensor,\n) -> torch.LongTensor:\n return mask.sum(-1)", "def obstacle_count(self):\n #scan area in front of robot\n self.scan()\n #Figure ot how many obstacles there were\n see_an_object = False\n count = 0", "def read_flags():\n return flag_args", "def get_flagged(self):\n self.cur.execute(\"SELECT video_ID FROM flags\")\n flagged_IDs = []\n for ID in self.cur.fetchall():\n flagged_IDs.append(ID[0])\n return flagged_IDs", "def numfans():\n click.echo(_wrapper_get_num_fans())", "def bits(self):\n return self._q.bit_length()", "def n_bits(self):\n return self._n_bits", "def count_segments(markers) -> int:\n cnt = Counter()\n for row in markers:\n cnt.update(row)\n n_cnt = dict(takewhile(lambda x: x[1] >= 10, cnt.most_common()))\n del n_cnt[1]\n del n_cnt[-1]\n return len(n_cnt.keys())", "def EvaluateFlags(flags: int) -> Dict[str, bool]:\n results = {}\n sumFlags = 0\n for flagName, flagMaskVal in RSEGeometryFlags.FLAG_MASKS.items():\n sumFlags += flagMaskVal\n if flagMaskVal & flags > 0:\n results[flagName] = True\n else:\n results[flagName] = False\n\n if flags > sumFlags:\n results[\"UnevaluatedFlags\"] = True\n else:\n results[\"UnevaluatedFlags\"] = False\n\n return results", "def __len__(self):\n return sum(l for l, op,in self.items() \\\n if op in Cigar.read_consuming_ops)", "def count(self, e):\n try:\n return self.vals[e]\n except:\n return 0", "def _bit_storing_size(n):\n return -((-n) // 8)", "def flags(self):\n return self.ast_node.flags", "def __len__(self):\n return self._used - self._deleted", "def shitty_count_set_bits(num:int) -> int:\n count = 0\n while num != 0:\n count += num & 1\n num >>= 1 # heh\n return count", "def BitsRemaining(self):\n return self.NumBits() - (8*self.idx_byte + self.idx_boff) - 1", "def get_correct_lap_count(self):", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def get_repetition_flags(self):\n day_flags = 0\n for i, box in enumerate((self._timer_mo_check_button,\n self._timer_tu_check_button,\n self._timer_we_check_button,\n self._timer_th_check_button,\n self._timer_fr_check_button,\n self._timer_sa_check_button,\n self._timer_su_check_button)):\n\n if box.get_active():\n day_flags = day_flags | (1 << i)\n\n return day_flags", "def bit_length(self): # real signature unknown; restored from __doc__\n pass" ]
[ "0.66515917", "0.6642287", "0.6426211", "0.6261821", "0.61500776", "0.60658044", "0.6009201", "0.5997175", "0.5990798", "0.5918034", "0.5895515", "0.5893722", "0.5876723", "0.58467567", "0.5845648", "0.5832519", "0.5824113", "0.5818109", "0.5780171", "0.57786083", "0.5770653", "0.57540923", "0.5729343", "0.57201636", "0.5685142", "0.56758755", "0.56720626", "0.5650433", "0.56481284", "0.56459904", "0.5644716", "0.56367195", "0.56320137", "0.56183785", "0.5611765", "0.56000286", "0.557629", "0.55696106", "0.55692357", "0.5566825", "0.55436194", "0.5540876", "0.55259854", "0.55256575", "0.5521703", "0.551284", "0.550796", "0.5502566", "0.54982096", "0.54965276", "0.5487259", "0.54813766", "0.5476836", "0.54762477", "0.5468747", "0.5464073", "0.5457037", "0.5456355", "0.54458493", "0.54454863", "0.54422987", "0.5436023", "0.5434793", "0.54317665", "0.54313135", "0.5430251", "0.5425259", "0.5424389", "0.5421609", "0.5401713", "0.5399596", "0.5398031", "0.5396242", "0.5396242", "0.5396242", "0.5396242", "0.5395003", "0.53908855", "0.5387581", "0.53780895", "0.53768164", "0.53766507", "0.5371941", "0.5369353", "0.536807", "0.5366558", "0.5362822", "0.5359197", "0.5359027", "0.5358489", "0.5357478", "0.5356384", "0.53537714", "0.53475255", "0.5346196", "0.5343339", "0.5342788", "0.5336142", "0.53347725", "0.53318137" ]
0.6600916
2
Open neighbours if the flag number matches the count.
def special_open_neighbours(self, y, x): if self.table_state[y][x] != "-" and self.table_state[y][x] == self.flags_nearby(y, x): l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: # do not open out of bounds continue # if it is a bomb but not flagged if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG: self.show_answer_board([ye, xe]) print "KABOOM!" return Minesweeper.IS_A_BOMB self.open_neighbours(y, x) self.print_table(self.table_state) return Minesweeper.NOT_A_BOMB
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_open(self, n_faces):\r\n count_used = Counter([item for sublist in self.tiles\r\n for item in sublist\r\n if item in self.get_borders()])\r\n if min(count_used.values()) == n_faces:\r\n self.open = False", "def checkNumNeighbors():", "def count_neighbor_flags(self, i, j):\n return np.count_nonzero(self.flags[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])", "def count_neighbor_flags(self, x, y):\n\t\treturn sum(self.marks[n][m] == FLAG for (n, m) in self.get_valid_neighbors(x, y))", "def open_neighbours(self, y, x):\n if [y, x] in self.mine_locations:\n return [y, x]\n # generate neighbours with positive indexes\n l = [[ye, xe] for xe in range(\n x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]\n for ye, xe in l:\n # if the indexes are out of the game table, skip\n if xe >= self.x or ye >= self.y:\n continue\n # if the current coordinates are still untouched, update their values\n if self.table_state[ye][xe] == '-':\n self.table_state[ye][xe] = self.final_table[ye][xe]\n # if the coordinate has a value of 0, recursively open it's neighbours.\n if self.final_table[ye][xe] == '0':\n self.open_neighbours(ye, xe)", "def _open_zeros(self, display: bool = False) -> None:\n\n if display:\n print('Before \"Open Zeros\":')\n print(repr(self), \"\\n\")\n\n for pos, space in self._lookup.items():\n if space.hint == '0':\n for neighbor in space.neighbors.values():\n if neighbor and self._lookup[neighbor].hint == '?':\n self._open(*neighbor)\n if display:\n print('After \"Open Zeros\":')\n print(repr(self), \"\\n\")", "def open_neighbour_cells(self, my_board, x, y):\n for _x in range(x-1, x+2):\n for _y in range(y-1, y+2):\n if is_valid(_x, _y):\n if is_new_move(my_board, _x, _y):\n my_board[_x, _y] = self.count_neighbour_mines(_x, _y)\n if my_board[_x, _y] == 0:\n my_board = self.open_neighbour_cells(my_board, _x, _y)\n return my_board", "def open_neighbour_cells(self, my_board, x, y):\n for _x in range(x-1, x+2):\n for _y in range(y-1, y+2):\n if is_valid(_x, _y):\n if is_new_move(my_board, _x, _y):\n my_board[_x, _y] = self.count_neighbour_mines(_x, _y)\n if my_board[_x, _y] == 0:\n my_board = self.open_neighbour_cells(my_board, _x, _y)\n return my_board", "def flags_nearby(self, y, x):\n count = 0\n l = [[ye, xe] for xe in range(\n x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]\n for ye, xe in l:\n if xe >= self.x or ye >= self.y:\n continue\n if self.table_state[ye][xe] == Minesweeper.FLAG:\n count += 1\n return str(count)", "def numIslands(grid):\n # count to store each new island found\n count = 0\n # If the grid is empty, return 0\n if not grid:\n return count\n\n y_max = len(grid)\n x_max = len(grid[0])\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n dfs(grid, i, j)\n count += 1\n return count", "def count_islands(grid):\n grid_copy = list(grid)\n count = 0\n for i in range(0, len(grid_copy)):\n for j in range (0, len(grid_copy[0])):\n if grid[i][j] and grid_copy[i][j]:\n _dfs(grid_copy, i, j)\n count += 1\n return count", "def add_to_open(open, neighbour):\n for node in open:\n if neighbour == node and neighbour.f >= node.f:\n # Will not add if there already exists the same node in open that has lower f value\n return False\n\n return True", "def check_neighbours(self, grid):\n if self.bomba:\n self.bombs_around = -1\n return\n\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n\n if neighbor.bomba:\n total += 1\n \n self.bombs_around = total", "def next_alive(neighbors, occupants):\n return bool((occupants == 0 and neighbors == 3) or\n (occupants == 1 and 2 <= neighbors <= 3))", "def obstacle_count(self):\n self.wide_scan()\n found_something = False\n counter = 0\n for distance in self.scan:\n if distance and distance < 200 and not found_something:\n found_something = True\n counter += 1\n print(\"Object # %d found, I think\" % counter)\n if distance and distance > 200 and found_something:\n found_something = False\n print(\"\\n----I SEE %d OBJECTS----\\n\" % counter)", "def n_neighbors(self,n):\n return sum(1 for x in self.hex.get_neighbors_ring(n) if x is not None and x.is_occupied == 1)", "def checkNeighbours(data):\n features = 0\n background = 0\n neighbours = [data[0,0],data[0,1],data[0,2],data[1,2],data[2,2],data[2,1],data[2,0],data[1,0]]\n fourConnected = False\n lastPoint = neighbours[-1] #Needed for checking a complete transition cycle\n for n in neighbours:\n if not n:\n features += 1\n elif fourConnected:\n background += 1\n\n fourConnected = not fourConnected\n lastPoint = n\n\n for pos,corner in enumerate(corners):\n if numpy.alltrue(data == corner):\n cornerPos = pos+1\n break\n else:\n cornerPos = 0\n return (features,background,cornerPos)", "def all_bees_raised_flag(self):\n pos, com, success = self.perception\n if len(pos) > 0:\n return all(map(lambda x: x[1][\"flag\"] == (self.nr_of_possible_neighbors + 1), com))\n else:\n return True", "def _check_satisfied_neighborhood(\n recursive_counter: int, stop_recursive: int, matrix_size: int\n) -> bool:\n return recursive_counter >= stop_recursive * (matrix_size ** 2)", "def island():\n\n grid = [\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"1\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"1\", \"1\"]\n ]\n\n def dfs():\n rows = len(grid)\n cols = len(grid[0])\n count = 0\n for i in range(0, rows):\n for j in range(0, cols):\n if grid[i][j] == '1':\n check_valid(i, j, grid)\n count = count + 1\n return count\n\n def check_valid(i, j, grid=None):\n rows = len(grid)\n cols = len(grid[0])\n\n if not 0 <= i < rows or not 0 <= j < cols or grid[i][j] != '1':\n return\n\n grid[i][j] = '0'\n\n check_valid(i + 1, j, grid)\n check_valid(i - 1, j, grid)\n check_valid(i, j + 1, grid)\n check_valid(i, j - 1, grid)\n\n return dfs()", "def count_islands(matrix):\n visited = init_visited(matrix)\n num_islands = 0\n for i in range(len(matrix)):\n for j in range(len(matrix)):\n if matrix[i][j] and not visited[i][j]:\n check_neighbours(matrix, (i, j), visited)\n num_islands += 1\n # print(visited)\n return num_islands", "def _is_valid_count(self, count: int, gp: GriddedPerm) -> bool:\n return self._point_in_fuse_region(gp) + 1 == count", "def neighbors_of_8(mapdata, x, y):\n eight_neigh = set()\n # if PathPlanner.is_cell_walkable(mapdata, x, y) == True:\n eight_neigh |= PathPlanner.neighbors_of_4(mapdata,x,y)\n if (PathPlanner.is_cell_walkable(mapdata, x+1, y+1)):\n eight_neigh |= {(x+1,y+1)}\n if (PathPlanner.is_cell_walkable(mapdata, x-1, y-1)):\n eight_neigh |= {(x-1,y-1)}\n if (PathPlanner.is_cell_walkable(mapdata, x+1, y-1)):\n eight_neigh |= {(x+1,y-1)}\n if (PathPlanner.is_cell_walkable(mapdata, x-1, y+1)):\n eight_neigh |= {(x-1,y+1)} \n\n return eight_neigh", "def open(self, xy):\n if xy in self.opened:\n return\n \n self.opened.add(xy)\n if xy in self._mines:\n self.mines_near[xy] = 'mine'\n self.flag(xy) # simplifies playing after death logic\n self.lose()\n else:\n self.mines_near[xy] = len(self.neighbours[xy] & self._mines)\n self.flagged.discard(xy)\n self.empty_remaining -= 1\n if self.empty_remaining <= 0:\n self.win()", "def get_neighbors(self, cell, count):\n row, col = cell\n # get all the neighbors\n neighbors = set([(min(self.height - 1, max(row + i, 0)), min(self.width - 1, max(col + j, 0))) \n for i in range(-1, 2)\n for j in range(-1, 2)])\n\n for neighbor in deepcopy(neighbors):\n if neighbor in self.safes or neighbor == cell:\n neighbors.remove(neighbor)\n elif neighbor in self.mines:\n neighbors.remove(neighbor)\n count -= 1\n\n return neighbors, count", "def add_neighbor(self):\n self.fono += 1", "def open_spots(self):\n ret = []\n for i in range(1,25):\n if self.nodes[i].piece == None:\n ret.append(i)\n return ret", "def open_adjacents(self, row, col, opened_tile): \n # Iterates through neighboring tiles, only opening closed tiles adjacent to a zero tile.\n for i in [row-1, row, row+1]:\n for j in [col-1, col, col+1]:\n if (self.valid_tile(i, j) and self.tiles[i][j].category == Tiles.closed):\n self.opened += 1\n self.tiles[i][j] = self.board[i][j]\n opened_tile.append(self.board[i][j])\n\n # Checks for a game winning move while opening adjacent tiles.\n if (self.opened + self.mines) == (self.rows * self.cols):\n self.game_won = True\n\n # If an adjacent tile is zero, recursively opens that tile's adjacent tiles.\n if self.board[i][j].category == Tiles.zero:\n self.open_adjacents(i, j, opened_tile)\n\n return opened_tile", "def propagate(possible: np.array, count: ma.array, where: ma.array) -> int:\n while np.equal(count, 1, out=where).any():\n i, j = _neighbors[:, where, :]\n _, k = possible[where, :].nonzero()\n possible[i, j, k[:, np.newaxis]] = False\n if not possible.sum(axis=2, out=count).all():\n return -1 # site with 0 possibility => infeasibility\n count[where] = ma.masked # avoid repetitive work\n return count.count()", "def count_paths((min_i, min_j), (max_i, max_j)):\n\n def explore((i, j), path):\n found = 0\n for (x, y) in neighbors((min_i, min_j), (max_i, max_j), (i, j)):\n if (x, y) == (max_i, max_j):\n found += 1\n debug(\"neighbor %r of node %r on path %r is a goal node: +1\" % ((x, y), (i, j), path))\n elif (x, y) in path: \n debug(\"neighbor %r of node %r already on path %r; ignoring...\" % ((x, y), (i, j), path))\n continue\n else:\n debug(\"neighbor %r of node %r not already on path %r; exploring ...\" % ((x, y), (i, j), path))\n found += explore((x, y), mkpath(path, (x, y)))\n return found\n return explore((0, 0), set([(0, 0)]))", "def neighbors(self, x):\n pass", "def get_flagged_neighbours(self, row, col):\n return [cell for cell in self.get_neighbours(row, col) if cell.state == 'X']", "def floodfill(i, j, row, col, island):\n count = 0\n if island[i][j] == 1:\n island[i][j] = 2\n eightdirections = [(1, 0), (-1, 0), (0, 1), (0, -1),\n (1, 1), (1, -1), (-1, 1), (-1, -1)]\n newpositions = [(i+x, j+y) for x, y in eightdirections]\n for posx, posy in newpositions:\n if posx in range(0, row) and posy in range(0, col):\n floodfill(posx, posy, row, col, island)\n count = 1\n return count", "def num_neighbors(self, num_neighbors):\n self._num_neighbors = num_neighbors", "def dfs(cell):\n r, c = cell\n if (0 <= r < len(grid)) and (0 <= c < len(grid[0])) and (cell not in visited) and (grid[r][c] != 0):\n\n visited.add((r, c)) # save cell\n grid[r][c] = self.num_islands\n # update current island size\n dfs((r, c+1))\n dfs((r+1, c))\n dfs((r-1, c))\n dfs((r, c-1))\n\n else:\n # out of bounds or visited\n return", "def _count_living_neighbors(self, cell: Cell) -> int:\n count = 0\n # borders of the area in which we are trying to find neighbors\n # Let's assume y axis directs downside and x axis directs to the left\n \n for x in range(cell.x - 1, cell.x + 2):\n for y in range(cell.y - 1, cell.y + 2):\n if cell.x == x and cell.y == y:\n continue\n if (x, y) in self.living_cells.keys():\n count += 1\n \n return count", "def neighbor(board, x, y, n, m):\n deltas = (\n (-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1),\n )\n count = 0\n for dx, dy in deltas:\n xx = x + dx\n yy = y + dy\n if xx >= 0 and xx < n and yy >= 0 and yy < m and board[xx][yy] % 2 == 1:\n count += 1\n\n return count", "def explore_all_neighbors(z, i):\n\n n = len(z)\n q = [i]\n found = False\n while len(q) > 0:\n i = q.pop()\n\n for j in range(n):\n\n if z[i][j] == 1: # Reset All Neighbours\n z[i][j] = 0\n z[j][i] = 0\n found = True\n if i != j:\n q.append(j)\n\n return found", "def neighbors_of_4(mapdata, x, y):\n four_neigh = set()\n # if PathPlanner.is_cell_walkable(mapdata, x, y) == True:\n if (PathPlanner.is_cell_walkable(mapdata, x+1, y)):\n four_neigh |= {(x+1,y)}\n if (PathPlanner.is_cell_walkable(mapdata, x-1, y)):\n four_neigh |= {(x-1,y)}\n if (PathPlanner.is_cell_walkable(mapdata, x, y+1)):\n four_neigh |= {(x,y+1)}\n if (PathPlanner.is_cell_walkable(mapdata, x, y-1)):\n four_neigh |= {(x,y-1)}\n\n\n return four_neigh", "def toggleFlag(self, event): \n clicked = event.widget\n if clicked.isInPlay(): self.changeSmile(1)\n value = clicked.setFlag()\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n adjTile.numFlags += value\n self.numFlags += value\n self.flagLabel.configure(text=\"Flags: \"+str(self.numFlags))", "def find_neighbors(self):\n #checked#\n ###your code here###\n for address in self.homes:\n for i in range(-1, 2):\n for j in range(-1,2):\n neighbor_address=(address[0]+i, address[1]+j)\n if neighbor_address in self.homes and neighbor_address!=address:\n self.homes[address].neighbors.append(self.homes[neighbor_address])", "def play_round_Fredkin_Cell(self):\n for x in self.board:\n for f in x:\n f.live_neighbors = 0\n\n for i in range(1, self.cols - 1):\n for j in range(1, self.rows - 1):\n status = self.board[i][j].status\n assert type(status)==int \n for m in range(i-1 , i +2):\n self.board[m][j].live_neighbors += status\n for n in range(j-1 , j +2):\n self.board[i][n].live_neighbors += status\n\n self.board[i][j].live_neighbors -= status", "def big_fun_search(game, grid_size, pokemon_locations, index):\n queue = [index]\n discovered = [index]\n visible = []\n\n if game[index] == FLAG:\n \treturn queue\n\n number = number_at_cell(game, pokemon_locations, grid_size, index)\n if number != 0:\n return queue\n\n while queue:\n node = queue.pop()\n for neighbour in neighbour_directions(node, grid_size):\n if neighbour in discovered or neighbour is None:\n continue\n\n discovered.append(neighbour)\n if game[neighbour] != FLAG:\n number = number_at_cell(game, pokemon_locations, grid_size, neighbour)\n if number == 0:\n queue.append(neighbour)\n visible.append(neighbour)\n return visible", "def setNeighbors(self):\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n\n #Checks the 8 cells around the living one. \n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n\n #If the position is outside the world, loop around.\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n\n #Skipping itself. Becouse we do not want to calculate itself as a neighbor\n if(neighborsX == cell.x and neighborsY == cell.y):\n continue\n else:\n #Checks if a cell exist at neighborsX, neighborsY\n cellToCheck = self.getCellFromPosition(neighborsX, neighborsY)\n if(cellToCheck != False):\n #Add one to the neighbor var if there already exist and cell for the given position.\n cellToCheck.numOfNeighbor += 1\n else:\n #Creates a new cell if it do not exist any.\n newCell = Cell(self.screen, neighborsX, neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)", "def fn(i, j, empty):\n nonlocal ans \n if grid[i][j] == 2: \n if empty == -1: ans += 1\n return \n grid[i][j] = -1 # mark as visited \n for ii, jj in (i-1, j), (i, j-1), (i, j+1), (i+1, j): \n if 0 <= ii < m and 0 <= jj < n and grid[ii][jj] != -1: \n fn(ii, jj, empty-1)\n grid[i][j] = 0 # backtracking", "def calc_number_neighbours(num_electrons: int):\r\n if num_electrons < -4 or num_electrons > 4 : \r\n # if number of missing/extra e- higher than 4, then distort 8-num_electrons\r\n num_neighbours = abs(8 - abs(num_electrons) )\r\n elif -4 < num_electrons < 4:\r\n num_neighbours = abs(num_electrons)\r\n elif abs(num_electrons) == 4:\r\n num_neighbours = abs(num_electrons)\r\n \r\n return abs(num_neighbours)", "def set_adjacent_mine_count(self):\n for position in self.grid_coords:\n x, y = position\n if self.grid[y][x] >= 0:\n grid_value = sum(map(self.is_mine, get_adjacent.get_adjacent(position)))\n self.grid[y][x] = grid_value", "def globalNeighbors (listAtom, count):\n\n for atom in listAtom:\n nbNeighbor = numberNeigthbor(atom[\"neighbors\"])\n for neighbor in atom[\"neighbors\"]:\n if not nbNeighbor in count.keys():\n count[nbNeighbor] = structure.countElements()\n\n if neighbor[\"element\"] in count[nbNeighbor].keys():\n count[nbNeighbor][neighbor[\"element\"]] = count[nbNeighbor][neighbor[\"element\"]] + 1\n\n else:\n count[nbNeighbor][\"others\"] = count[nbNeighbor][\"others\"] + 1", "def fn(i):\n for j in range(n):\n if grid[i][j] and not seen[j]: \n seen[j] = True\n if match[j] == -1 or fn(match[j]): \n match[j] = i\n return True \n return False", "def count_island(row, col, island):\n count = 0\n for i in range(row):\n for j in range(col):\n count = count + floodfill(i, j, row, col, island)\n return count", "def big_fun_search(self, grid_size, pokemon_locations, index):\n queue = [index]\n discovered = [index]\n visible = []\n\n if self.get_game()[index] == FLAG:\n return queue\n\n number = self.number_at_cell(pokemon_locations, grid_size, index)\n if number != 0:\n return queue\n\n while queue:\n node = queue.pop()\n for neighbour in self.neighbour_directions(node, grid_size):\n if neighbour in discovered:\n continue\n\n discovered.append(neighbour)\n if self._game_board[neighbour] != FLAG:\n number = self.number_at_cell(pokemon_locations, grid_size, neighbour)\n if number == 0:\n queue.append(neighbour)\n visible.append(neighbour)\n return visible", "def check_ext(im, i, j):\n neighb = 0\n count = 0\n for a in range(8):\n if (im[i+relpos[a][0], j+relpos[a][1]] and (count == 0)):\n count += 1\n neighb += 1\n else:\n count = 0\n return (neighb < 2)", "def _check_neighbors(self):\n for direction, dir_info in self.DIRECTIONS.items():\n pos = Point(\n self.position.x + dir_info[\"mask\"][0],\n self.position.y + dir_info[\"mask\"][1]\n )\n status = self.move(direction)\n self.grid[status].add(pos)\n if status in (1, 2):\n # moved\n self.move(dir_info[\"opposite\"])\n yield pos", "def node_in_open_triangle(G, n):\n in_open_triangle = False\n \n # Iterate over all possible triangle relationship combinations\n for n1, n2 in combinations(G.neighbors(n), 2):\n \n # Check if n1 and n2 do NOT have an edge between them\n if not G.has_edge(n1, n2):\n \n in_open_triangle = True\n \n break\n \n return in_open_triangle", "def num_good_neighbors(self, num_good_neighbors):\n self._num_good_neighbors = num_good_neighbors", "def get_neighbours_count(self, cell: Position) -> int:\n possible_neighbours = self.get_neighbours(cell)\n return sum(self.is_alive(n) for n in possible_neighbours)", "def get_neighbor_live_count(cart):\n count = 0\n for i in range(6):\n cart2 = (cart[0] + dxv[i],cart[1] + dyv[i],cart[2] + dzv[i])\n if check_cart(cart2) and voxel_data[cart_to_loc(cart2)] == 1:\n count += 1\n return count", "def checkAmountOfNeighbors(self):\n cellsToDelete = []\n for cell in self.cells:\n if(cell.numOfNeighbor > 3 or cell.numOfNeighbor < 2 or (cell.numOfNeighbor == 2 and cell.dead == True)):\n cellsToDelete.append(cell)\n elif(cell.numOfNeighbor == 3 and cell.dead == True):\n cell.makeAlive()\n cell.numOfNeighbor = 0\n\n self.removeCells(cellsToDelete)", "def test(self, grid, flag):\n x = self.x+SPEED_X[flag]\n y = self.y+SPEED_Y[flag]\n return 0 <= x < self.n and 0 <= y < self.n and grid[y][x] == 1", "def get_neighbours(self, grid):\n\t\tfor diff in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n\t\t\tres = Vector((self.row, self.col)) + diff\n\t\t\tif res[0] >= 0 and res[1] >= 0 and res[0] < len(grid) and res[1] < len(grid[0]):\n\t\t\t\tyield grid[res[0]][res[1]]", "def update_visited(self):\n\t\tcount = self.visited\n\t\tcount = count + 1\n\t\tself.visited = count", "def solvable(grid):\n y = x = 1\n stack = deque([(0, y, x,)])\n goal = len(grid) - 2\n found = np.ones_like(grid, dtype=bool)\n \n while stack:\n i, y, x = stack.popleft()\n i += 1\n for y2, x2 in solve_perfect.neighbors(y, x, grid):\n if found[y2, x2]:\n if y2 == goal and x2 == goal:\n return i\n else:\n found[y2, x2] = False\n stack.append((i, y2, x2,))\n \n return 0", "def test_can_traverse_tall_grid(self):\n grid = [\n [\"0\"],\n [\"1\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n ]\n result = num_islands(grid)\n self.assertEqual(result, 4)", "def get_open_cells(field: MineField, cell: Cell) -> List[Cell]:\n # TODO re-implement iteratively to safe stack space (especially for large boards)\n if cell.is_flag or cell.is_mine or not field.cell_is_safe(cell):\n return [cell]\n \n if cell.visited:\n return\n\n open_cells: list = list()\n cell.visited = True\n open_cells.append(cell)\n\n for cell in field.surrounding_cells(cell):\n open_cells.append(cell)\n if cell.is_flag or cell.visited or not field.cell_is_safe(cell):\n continue\n\n\n open_cells += get_open_cells(field, cell)\n\n return open_cells", "def update_status(self):\n num_nbrs = len(self.neighbors)\n if not 2 <= num_nbrs <= 3:\n self.status = 0\n elif num_nbrs == 3:\n self.status = 1", "def neighborhood(index, npoints, maxdist=1):\n return [index + i for i in range(-maxdist, maxdist + 1)\n if i != 0 and 0 <= index + i <= npoints - 1]", "def get_neighbour(self, y, x):\n if [y, x] in self.mine_locations:\n return Minesweeper.BOMB\n count = 0\n # (x-1, y-1), (x, y-1), (x+1, y-1),\n # (x-1, y), (x, y), (x+1, y),\n # (x-1, y+1), (x, y+1), (x+1, y+1)\n for xe in range(x - 1, x + 2):\n for ye in range(y - 1, y + 2):\n if [ye, xe] in self.mine_locations:\n count += 1\n return str(count)", "def mineNeighbor(self, cell):\n\n # Keep count of nearby mines\n counter = 0\n\n # Loop over all cells within one row and column\n for i in range(cell[0] - 1, cell[0] + 2):\n for j in range(cell[1] - 1, cell[1] + 2):\n\n # Ignore the cell itself\n if (i, j) == cell:\n continue\n\n # Update count if cell in bounds and is mine\n if 0 <= i < self.height and 0 <= j < self.width:\n if self.board[i][j]:\n counter += 1\n\n return counter", "def update_neighbors(self):\n neighbors = []\n for i in range(-1, 2):\n for j in range(-1, 2):\n if (i, j) == (0, 0):\n continue\n try:\n y, x = self.loc[0]+i, self.loc[1]+j\n neighbor = self.board.array[y, x]\n if neighbor > 0:\n neighbors.append(neighbor)\n except:\n continue\n \n self.neighbors = neighbors", "def count_alive_neighbors(self, status):\n kernel = np.array(\n [[1, 1, 1],\n [1, 0, 1],\n [1, 1, 1]])\n\n count = convolve2d(status, kernel, mode='same', boundary=\"wrap\")\n return count", "def living_neighbors(self):\n neighborCount = 0\n for neighbor in self.__neighbors:\n if neighbor.get_living() == True:\n neighborCount += 1\n return neighborCount", "def num_neighbors(self):\n return self._num_neighbors", "def occupied(self):\n self.is_occupied = 1\n for hex in self.fon:\n hex.add_neighbor()\n hex.set_quality()", "def obstacle_count(self):\n #scan area in front of robot\n self.scan()\n #Figure ot how many obstacles there were\n see_an_object = False\n count = 0", "def open(self, i, j):\n if not self.isOpen(i, j):\n # set open to true\n self.arr_open[self._index(i, j)] = True\n # connect to surrounding sites\n [self.qu.union(self._index(i, j), self._index(x[0], x[1]))\n for x in [(i + 1, j), (i - 1, j), (i, j - 1), (i, j + 1)]\n if self.isOpen(x[0], x[1])]", "def neighbor_count(A):\n sum2 = lambda A, B: map2(add, A, B)\n neighbors = ((-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1))\n return reduce(sum2,\n map(lambda d: rotate2(A, d[0], d[1]),\n neighbors))", "def test_count_neighbors(self):\n m, n = 5, 5\n k, p = 0.2, 0.7\n agents = [ConwayAgent(ii, ii & 0x1 == 1) for ii in range(m * n)]\n C = ConwayModel(m, n, k, p, agents)\n\n to_count = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]])\n expected = np.array([[1, 1, 2], [2, 3, 1], [0, 2, 1]])\n result = C.count_neighbors(to_count)\n self.assertTrue(np.all(expected == result))", "def neighbours(number: int, number_sectors: int) -> [int, int, int, int]:\n col = number % number_sectors\n row = number // number_sectors\n\n nieg = [number - number_sectors, number + number_sectors, number - 1, number + 1]\n\n if row == 0:\n nieg[0] = -1\n if row == number_sectors - 1:\n nieg[1] = -1\n if col == 0:\n nieg[2] = -1\n if col == number_sectors - 1:\n nieg[3] = -1\n return nieg", "def dfs_search(board):\n goalcount = 0\n fringe = deque([])\n count = 0\n fringe.append(board)\n while(True):\n if len(fringe) is 0:\n print(\"Empty Fringe\")\n return\n n = fringe.pop()\n # print(n)\n goalcount = goalcount + 1\n if n.goal_test():\n print goalcount\n print count\n return\n column = n.get_next_unassigned_var()\n for val in n.choices[column]:\n count = count+1\n child = nQueens(copy.deepcopy(n.state), copy.deepcopy(n.choices), copy.deepcopy(n.n), n)\n child.assign(column, val)\n fringe.append(child)", "def test_can_traverse_wide_grid(self):\n grid = [[\"1\", \"0\", \"1\", \"1\", \"0\", \"1\", \"0\", \"0\", \"1\", \"0\"]]\n result = num_islands(grid)\n self.assertEqual(result, 4)", "def check_neighbours(self):\n for p in self.targetCell.possibilities:\n if p != 0:\n if p not in self.targetCell.row_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.column_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.box_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n return False", "def get_numbered_neighbours(self, row, col):\n return [cell for cell in self.get_neighbours(row, col) if type(cell.state) is int]", "def countFreeNeighbors( p, board, occupation):\n n = 0\n for m in [0, 1]:\n for d in [-1, 1]:\n pn = [p[0], p[1]]\n pn[m] += d\n j = board.grids.get( tuple(pn), None)\n if (j is None): continue # Not a board point\n if (occupation.has_key( j)): continue # Occupied\n n += 1\n return n", "def neighbors(self, cell):\n x = cell.x\n y = cell.y\n for new_x, new_y in [(x, y - 1), (x, y + 1), (x - 1, y), (x + 1, y)]:\n neighbor = self[new_x, new_y]\n if neighbor is not None:\n yield neighbor", "def _dfs(grid, i, j):\n grid[i][j] = False\n for x in range(i - 1, i + 2):\n for y in range(j - 1, j + 2):\n if (abs((x + y) - (i + j)) == 1) and _is_valid_land(x, y, grid):\n _dfs(grid, x, y)", "def check_neighbours(matrix, cur_pos, visited):\n visited[cur_pos[0]][cur_pos[1]] = True\n\n for i in range(num_of_neighbours):\n cur_neighbour = (cur_pos[0]+neighbours_positions[i][0], cur_pos[1]+neighbours_positions[i][1])\n if is_safe(matrix, cur_neighbour, visited):\n check_neighbours(matrix, cur_neighbour, visited)", "def play_round_Conway_Cell(self):\n for x in self.board:\n for f in x:\n f.live_neighbors = 0\n\n for i in range(1, self.cols - 1):\n for j in range(1, self.rows - 1):\n status = self.board[i][j].status\n assert type(status)==int \n\n for m in range(i - 1, i + 2):\n for n in range(j - 1, j + 2):\n self.board[m][n].live_neighbors += status\n self.board[i][j].live_neighbors -= status", "def count_neighbour_mines(self, x, y):\n neighbour_mines = 0\n for _x in range(x - 1, x + 2):\n for _y in range(y - 1, y + 2):\n if is_valid(_x, _y):\n if is_mine(self.board, _x, _y):\n neighbour_mines += 1\n return neighbour_mines", "def count_neighbour_mines(self, x, y):\n neighbour_mines = 0\n for _x in range(x - 1, x + 2):\n for _y in range(y - 1, y + 2):\n if is_valid(_x, _y):\n if is_mine(self.board, _x, _y):\n neighbour_mines += 1\n return neighbour_mines", "def fill_count(nid):\n n_edges = G.subgraph(G.neighbors(nid)).number_of_edges()\n deg = G.degree[nid]\n n_fill = deg*(deg-1)//2 - n_edges\n return n_fill", "def numNeighbors(minesSet, row_index, cols_index, num_cols, num_rows):\n mines = 0\n for j in np.arange(max(0, cols_index-1), min(num_cols-1, cols_index+1)+1):\n for i in np.arange(max(0, row_index-1), min(num_rows-1, row_index+1)+1):\n if ((i, j) in minesSet):\n mines+=1\n return mines", "def find_path(self, num_visited, read):\n\n self.path.append(read)\n r = self.reads[read]\n r.visited += 1\n if num_visited < self.num_reads:\n finished = False\n for other_read in r.overlaps:\n if not finished and self.reads[other_read].visited < self.reads[other_read].visit_limit:\n finished = self.find_path(num_visited + 1, other_read)\n if not finished:\n self.path.pop()\n r.visited -= 1\n else:\n finished = True\n return finished", "def longOpenConditions(self,lastIndex):\n return 0", "def test_extreme_neighborhoods(self):\n\n ## Radius = 0 ==> all points are noise\n m = tc.dbscan.create(\n self.sf,\n distance=\"euclidean\",\n radius=0.0,\n min_core_neighbors=3,\n verbose=False,\n )\n\n self.assertEqual(m.num_clusters, 0)\n self.assertEqual(sum(m.cluster_id[\"type\"] == \"noise\"), self.n)\n\n ## Min_neighbors > 30 ==> all points are noise\n m = tc.dbscan.create(\n self.sf,\n distance=\"euclidean\",\n radius=0.0,\n min_core_neighbors=31,\n verbose=False,\n )\n\n self.assertEqual(m.num_clusters, 0)\n self.assertEqual(sum(m.cluster_id[\"type\"] == \"noise\"), self.n)\n\n ## Radius very large ==> all points are core points\n m = tc.dbscan.create(\n self.sf,\n distance=\"euclidean\",\n radius=100.0,\n min_core_neighbors=3,\n verbose=False,\n )\n\n self.assertEqual(m.num_clusters, 1)\n self.assertEqual(sum(m.cluster_id[\"type\"] == \"core\"), self.n)\n\n ## Min_neighbors = 0 ==> all points are core points\n m = tc.dbscan.create(\n self.sf,\n distance=\"euclidean\",\n radius=0.5,\n min_core_neighbors=0,\n verbose=False,\n )\n\n self.assertEqual(m.num_clusters, 1)\n self.assertEqual(sum(m.cluster_id[\"type\"] == \"core\"), self.n)", "def test_adjacent_bomb_count_2(self):\n index = 9\n adj_list = utils.adjacent_bomb_count(index)\n adj_list_2 = [\n index + x\n for x in utils.RIGHT_ADJ_LIST\n if 0 <= index + x <= (utils.TILE_COUNT - 1)\n ]\n self.assertEqual(adj_list, adj_list_2)", "def numIslands3(self, grid: List[List[str]]) -> int:\n m = len(grid)\n if m > 0:\n n = len(grid[0])\n else:\n return 0\n\n def dfs(grid, i, j):\n if grid[i][j] != '0':\n grid[i][j] = '0'\n\n for direction in self.directions(grid, i, j):\n dfs(grid, direction[0], direction[1])\n\n island = 0\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1':\n island += 1 # count the number of CCs\n dfs(grid, i, j)\n return island", "def obstacle_count(self):\n for x in range(6):\n # do a scan of the area in front of the robot\n self.scan()\n\n \n see_an_object = False\n count = 0 \n # Do a scan and count the amount of objects in the way\n for angle in self.scan_data:\n dist = self.scan_data[angle]\n if dist < self.SAFE_DISTANCE and not see_an_object: \n see_an_object = True\n count += 1\n print(\"~~~ I SEE SOMETHING!! ~~~\")\n elif dist > self.SAFE_DISTANCE and see_an_object:\n see_an_object = False\n print(\"I guess the object ended\") \n print(\"ANGLE: %d | DIST: %d\" % (angle, dist))\n self.turn_by_deg(90)\n print(\"\\nI saw %d objects\" % count)", "def main():\n row, col, island = make_matrix()\n print(count_island(row, col, island))", "def iter_node(self,i):\n nd = self.nodes[i]\n for kn in nd.get_close():\n # for kn in nd.get_known():\n # for kn in nd.neighbours:\n kn_node = self.nodes[kn.lindex]\n nd.add_known_nodes(kn.path_len,kn_node.get_close())", "def flag(self, i, j):\n # Does not allow starting a game with a flag\n if not self.is_game_over and self.is_initialized:\n if not self.revealed[i, j]:\n self.flags[i, j] = not self.flags[i, j]\n self.flags_pts.set_data(*np.where(self.flags)[::-1])\n self.title_txt.set_text('{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))\n self.refresh_canvas()" ]
[ "0.6551041", "0.6264699", "0.6198644", "0.6008779", "0.59324574", "0.58290005", "0.58284974", "0.58284974", "0.58235085", "0.5753008", "0.57382154", "0.5685492", "0.56439877", "0.5639313", "0.56212676", "0.5600274", "0.55936295", "0.55705774", "0.55526507", "0.5544796", "0.55295527", "0.5505192", "0.54728234", "0.5460384", "0.5455767", "0.5454988", "0.54543024", "0.5449874", "0.5410083", "0.5405375", "0.53893083", "0.5347647", "0.5346561", "0.53419036", "0.53266704", "0.5321156", "0.5317379", "0.5310679", "0.53084093", "0.53045815", "0.52951986", "0.5293396", "0.527921", "0.52779686", "0.52639735", "0.52547073", "0.52542496", "0.5247633", "0.52370846", "0.5234958", "0.52244663", "0.52156883", "0.5193584", "0.51753646", "0.5167458", "0.5163064", "0.5157976", "0.5149615", "0.51375455", "0.5133286", "0.51309586", "0.51261485", "0.5122247", "0.5115626", "0.5100911", "0.50988793", "0.5097927", "0.5089112", "0.50798136", "0.50779194", "0.5069773", "0.5067494", "0.50667274", "0.5065347", "0.50633746", "0.50620323", "0.5057568", "0.5056596", "0.503264", "0.5032612", "0.5032498", "0.50267345", "0.50069463", "0.49923372", "0.49852178", "0.49788773", "0.49732047", "0.49712425", "0.49712425", "0.49688745", "0.49681014", "0.49505237", "0.49476823", "0.49399155", "0.49281025", "0.49237415", "0.49224746", "0.49222025", "0.49150488", "0.49146175" ]
0.65664035
0
Open neighbours if the current coordinates are 0 and neighbours are untouched. Recursively opens if the neighbours are also 0.
def open_neighbours(self, y, x): if [y, x] in self.mine_locations: return [y, x] # generate neighbours with positive indexes l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: # if the indexes are out of the game table, skip if xe >= self.x or ye >= self.y: continue # if the current coordinates are still untouched, update their values if self.table_state[ye][xe] == '-': self.table_state[ye][xe] = self.final_table[ye][xe] # if the coordinate has a value of 0, recursively open it's neighbours. if self.final_table[ye][xe] == '0': self.open_neighbours(ye, xe)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_neighbour_cells(self, my_board, x, y):\n for _x in range(x-1, x+2):\n for _y in range(y-1, y+2):\n if is_valid(_x, _y):\n if is_new_move(my_board, _x, _y):\n my_board[_x, _y] = self.count_neighbour_mines(_x, _y)\n if my_board[_x, _y] == 0:\n my_board = self.open_neighbour_cells(my_board, _x, _y)\n return my_board", "def open_neighbour_cells(self, my_board, x, y):\n for _x in range(x-1, x+2):\n for _y in range(y-1, y+2):\n if is_valid(_x, _y):\n if is_new_move(my_board, _x, _y):\n my_board[_x, _y] = self.count_neighbour_mines(_x, _y)\n if my_board[_x, _y] == 0:\n my_board = self.open_neighbour_cells(my_board, _x, _y)\n return my_board", "def special_open_neighbours(self, y, x):\n if self.table_state[y][x] != \"-\" and self.table_state[y][x] == self.flags_nearby(y, x):\n l = [[ye, xe] for xe in range(\n x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]\n for ye, xe in l:\n if xe >= self.x or ye >= self.y: # do not open out of bounds\n continue\n # if it is a bomb but not flagged\n if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG:\n self.show_answer_board([ye, xe])\n print \"KABOOM!\"\n return Minesweeper.IS_A_BOMB\n self.open_neighbours(y, x)\n self.print_table(self.table_state)\n return Minesweeper.NOT_A_BOMB", "def open_adjacents(self, row, col, opened_tile): \n # Iterates through neighboring tiles, only opening closed tiles adjacent to a zero tile.\n for i in [row-1, row, row+1]:\n for j in [col-1, col, col+1]:\n if (self.valid_tile(i, j) and self.tiles[i][j].category == Tiles.closed):\n self.opened += 1\n self.tiles[i][j] = self.board[i][j]\n opened_tile.append(self.board[i][j])\n\n # Checks for a game winning move while opening adjacent tiles.\n if (self.opened + self.mines) == (self.rows * self.cols):\n self.game_won = True\n\n # If an adjacent tile is zero, recursively opens that tile's adjacent tiles.\n if self.board[i][j].category == Tiles.zero:\n self.open_adjacents(i, j, opened_tile)\n\n return opened_tile", "def _open_zeros(self, display: bool = False) -> None:\n\n if display:\n print('Before \"Open Zeros\":')\n print(repr(self), \"\\n\")\n\n for pos, space in self._lookup.items():\n if space.hint == '0':\n for neighbor in space.neighbors.values():\n if neighbor and self._lookup[neighbor].hint == '?':\n self._open(*neighbor)\n if display:\n print('After \"Open Zeros\":')\n print(repr(self), \"\\n\")", "def neighbors(self):\n \n # find 0 - blank square\n \n x0 = None\n y0 = None\n \n for i in range(4):\n for j in range(4):\n if self.get_tile(i,j) == 0:\n y0 = i\n x0 = j\n\n if x0 == None or y0 == None:\n return []\n \n neighbor_list = []\n \n # move 0 to the right\n if x0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0+1)\n new_position.set_tile(y0,x0+1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'r'\n neighbor_list.append(new_position)\n # move 0 to the left\n if x0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0-1)\n new_position.set_tile(y0,x0-1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'l'\n neighbor_list.append(new_position)\n # move 0 up\n if y0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0-1,x0)\n new_position.set_tile(y0-1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'u'\n neighbor_list.append(new_position)\n # move 0 down\n if y0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0+1,x0)\n new_position.set_tile(y0+1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'd'\n neighbor_list.append(new_position)\n \n return neighbor_list", "def check_neighbours(matrix, cur_pos, visited):\n visited[cur_pos[0]][cur_pos[1]] = True\n\n for i in range(num_of_neighbours):\n cur_neighbour = (cur_pos[0]+neighbours_positions[i][0], cur_pos[1]+neighbours_positions[i][1])\n if is_safe(matrix, cur_neighbour, visited):\n check_neighbours(matrix, cur_neighbour, visited)", "def update_neighbors(self):\n neighbors = []\n for i in range(-1, 2):\n for j in range(-1, 2):\n if (i, j) == (0, 0):\n continue\n try:\n y, x = self.loc[0]+i, self.loc[1]+j\n neighbor = self.board.array[y, x]\n if neighbor > 0:\n neighbors.append(neighbor)\n except:\n continue\n \n self.neighbors = neighbors", "def _dfs(grid, i, j):\n grid[i][j] = False\n for x in range(i - 1, i + 2):\n for y in range(j - 1, j + 2):\n if (abs((x + y) - (i + j)) == 1) and _is_valid_land(x, y, grid):\n _dfs(grid, x, y)", "def check_neighbours(self):\n for p in self.targetCell.possibilities:\n if p != 0:\n if p not in self.targetCell.row_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.column_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.box_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n return False", "def island():\n\n grid = [\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"1\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"1\", \"1\"]\n ]\n\n def dfs():\n rows = len(grid)\n cols = len(grid[0])\n count = 0\n for i in range(0, rows):\n for j in range(0, cols):\n if grid[i][j] == '1':\n check_valid(i, j, grid)\n count = count + 1\n return count\n\n def check_valid(i, j, grid=None):\n rows = len(grid)\n cols = len(grid[0])\n\n if not 0 <= i < rows or not 0 <= j < cols or grid[i][j] != '1':\n return\n\n grid[i][j] = '0'\n\n check_valid(i + 1, j, grid)\n check_valid(i - 1, j, grid)\n check_valid(i, j + 1, grid)\n check_valid(i, j - 1, grid)\n\n return dfs()", "def draw_open_cells(self):\n empty_cells = [cell for cell in self.game.get_cells() if cell.player == 0]\n nx.draw_networkx_nodes(self.G, pos=self.positions, nodelist=empty_cells,\n edgecolors='black', node_color='white', linewidths=2)", "def dfs(cell):\n r, c = cell\n if (0 <= r < len(grid)) and (0 <= c < len(grid[0])) and (cell not in visited) and (grid[r][c] != 0):\n\n visited.add((r, c)) # save cell\n grid[r][c] = self.num_islands\n # update current island size\n dfs((r, c+1))\n dfs((r+1, c))\n dfs((r-1, c))\n dfs((r, c-1))\n\n else:\n # out of bounds or visited\n return", "def check_neighbours(self, grid):\n if self.bomba:\n self.bombs_around = -1\n return\n\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n\n if neighbor.bomba:\n total += 1\n \n self.bombs_around = total", "def neighbors(self, x):\n pass", "def open(self, i, j):\n if not self.isOpen(i, j):\n # set open to true\n self.arr_open[self._index(i, j)] = True\n # connect to surrounding sites\n [self.qu.union(self._index(i, j), self._index(x[0], x[1]))\n for x in [(i + 1, j), (i - 1, j), (i, j - 1), (i, j + 1)]\n if self.isOpen(x[0], x[1])]", "def get_neighbours(self, grid):\n\t\tfor diff in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n\t\t\tres = Vector((self.row, self.col)) + diff\n\t\t\tif res[0] >= 0 and res[1] >= 0 and res[0] < len(grid) and res[1] < len(grid[0]):\n\t\t\t\tyield grid[res[0]][res[1]]", "def _check_neighbors(self):\n for direction, dir_info in self.DIRECTIONS.items():\n pos = Point(\n self.position.x + dir_info[\"mask\"][0],\n self.position.y + dir_info[\"mask\"][1]\n )\n status = self.move(direction)\n self.grid[status].add(pos)\n if status in (1, 2):\n # moved\n self.move(dir_info[\"opposite\"])\n yield pos", "def neighbours(self):\n\n neighbours = []\n root = self.root\n if self == root:\n return neighbours\n\n ########################\n # IMMEDIATELY ADJACENT #\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n coords = [(self.mins[0] + sizes[0] / 2, self.maxs[1] + sizes[1] / 2,),\n (self.maxs[0] + sizes[0] / 2, self.mins[1] + sizes[1] / 2,),\n (self.mins[0] + sizes[0] / 2, self.mins[1] - sizes[1] / 2,),\n (self.maxs[0] - sizes[0] / 2, self.mins[1] + sizes[1] / 2,),]\n # loop through top, right, bottom, left\n for i in range(4):\n x, y = coords[i]\n query_quad = root.query_xy(x, y)\n if query_quad is not None:\n same_size_idx = query_quad.location[: self.tree_depth]\n same_size_quad = root[same_size_idx]\n neighbours += list(self._get_border_children(same_size_quad, i))\n\n #############\n # DIAGONALS #\n root_sizes = [root.maxs[0] - root.mins[0], root.maxs[1] - root.mins[1]]\n xs, ys = (root_sizes / 2 ** root.max_tree_depth) / 2\n neighbours += [\n root.query_xy(self.mins[0] - xs, self.mins[1] - ys), # TL\n root.query_xy(self.maxs[0] + xs, self.mins[1] - ys), # TR\n root.query_xy(self.mins[0] - xs, self.maxs[1] + ys), # BL\n root.query_xy(self.maxs[0] + xs, self.maxs[1] + ys), # BR\n ]\n\n unique_neighbours = list(set(neighbours))\n try:\n unique_neighbours.remove(self)\n except ValueError:\n pass\n\n return unique_neighbours", "def get_neighbors(start_square, visited=[]):\n neighbors = []\n\n # loop over possible x values\n for i in [start_square.x - 1, start_square.x, start_square.x + 1]:\n\n # drop neighbors outside of our region of interest\n if i < 0 or i > MAX_X:\n continue\n\n # loop over possible y values\n for j in [start_square.y - 1, start_square.y, start_square.y + 1]:\n\n # drop neighbors outside of our region of interest\n if j < 0 or j > MAX_Y:\n continue\n\n # Ignore ourself\n if i == start_square.x and j == start_square.y:\n continue\n\n # Ignore corner pieces\n if i == start_square.x - 1 and j != start_square.y:\n continue\n if i == start_square.x + 1 and j != start_square.y:\n continue\n\n # Deal with barriers\n found = False\n for square in visited:\n if square.pos == [i, j]:\n found = True\n break\n if found:\n continue\n\n neighbors.append(Square(i, j))\n\n return neighbors", "def fn(i, j, empty):\n nonlocal ans \n if grid[i][j] == 2: \n if empty == -1: ans += 1\n return \n grid[i][j] = -1 # mark as visited \n for ii, jj in (i-1, j), (i, j-1), (i, j+1), (i+1, j): \n if 0 <= ii < m and 0 <= jj < n and grid[ii][jj] != -1: \n fn(ii, jj, empty-1)\n grid[i][j] = 0 # backtracking", "def setNeighbors(self):\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n\n #Checks the 8 cells around the living one. \n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n\n #If the position is outside the world, loop around.\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n\n #Skipping itself. Becouse we do not want to calculate itself as a neighbor\n if(neighborsX == cell.x and neighborsY == cell.y):\n continue\n else:\n #Checks if a cell exist at neighborsX, neighborsY\n cellToCheck = self.getCellFromPosition(neighborsX, neighborsY)\n if(cellToCheck != False):\n #Add one to the neighbor var if there already exist and cell for the given position.\n cellToCheck.numOfNeighbor += 1\n else:\n #Creates a new cell if it do not exist any.\n newCell = Cell(self.screen, neighborsX, neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)", "def graph_search(problem, open_nodes):\n explored = [problem.initial]\n open_nodes.append(Node(problem.initial))\n while len(open_nodes) > 0:\n node = open_nodes.pop()\n if problem.goal_test(node.state):\n #print \"Path cost: %d\" % node.path_cost\n print 'Broj poteza: ' + str(len(node.solution())-1)\n return node.solution()\n for child in node.expand(problem):\n if child.state not in explored:\n open_nodes.append(child)\n explored.append(child.state)\n return None", "def neighbours(x, y):\n n = []\n for c in ((y-1, x-1), (y-1, x), (y-1, x+1), (y, x-1), (y, x+1), (y+1, x-1), (y+1, x), (y+1, x+1)):\n n.append(c)\n return n", "def neighbors(current_node, maze):\n UP, DOWN, LEFT, RIGHT = -1, 1, -1, 1\n neighbors = []\n pos = [(0, UP), (0, DOWN), (LEFT, 0), (RIGHT, 0)]\n diag = [(LEFT, UP), (RIGHT, DOWN), (LEFT, DOWN), (RIGHT, UP)]\n if not args.disable_diagonal:\n pos += diag\n for new_position in pos:\n node_position = (\n current_node.position[0] + new_position[0],\n current_node.position[1] + new_position[1],\n ) \n # range check\n if (\n node_position[0] > (len(maze) - 1)\n or node_position[0] < 0\n or node_position[1] > (len(maze[node_position[0]]) - 1)\n or node_position[1] < 0\n ):\n continue\n # wall check\n if new_position in diag:\n if (\n maze[current_node.position[0]][current_node.position[1] + new_position[1]] == 0 \n and maze[current_node.position[0] + new_position[0]][current_node.position[1]] == 0\n ):\n continue\n if maze[node_position[0]][node_position[1]] == 0:\n continue\n new_node = Node(node_position)\n # g is how the cost of the step\n if new_position[0] != 0 and new_position[1] != 0:\n new_node.g = current_node.g + 1.44\n else:\n new_node.g = current_node.g + 1\n new_node.parent = current_node\n neighbors.append(new_node)\n return neighbors", "def neighbors(self, cell):\n x = cell.x\n y = cell.y\n for new_x, new_y in [(x, y - 1), (x, y + 1), (x - 1, y), (x + 1, y)]:\n neighbor = self[new_x, new_y]\n if neighbor is not None:\n yield neighbor", "def iter_neighbors(x: int, y: int) -> t.Generator[COORDINATE, None, None]:\n yield x - 1, y\n yield x + 1, y\n yield x, y - 1\n yield x, y + 1", "def search(self):\n open_set = set()\n closed_set = set()\n open_set.add(self.start_node)\n\n # loop through all nodes until open set is empty to build neighbor map\n while open_set:\n current_node = open_set.pop()\n closed_set.add(current_node)\n for removed_cells, score, next_status in current_node.find_next_moves():\n open_status_set = [i.status for i in open_set]\n closed_status_set = [i.status for i in closed_set]\n if next_status in open_status_set:\n index = open_status_set.index(next_status)\n node = list(open_set)[index]\n elif next_status in closed_status_set:\n index = closed_status_set.index(next_status)\n node = list(closed_set)[index]\n else:\n node = PopstarsNode(next_status)\n open_set.add(node)\n node.parents.append(current_node)\n current_node.children[node].append(\n (score, removed_cells, True))\n current_node.update_parents()\n max_score = []\n for i in self.start_node.children:\n max_score += self.start_node.children[i]\n return max(max_score)[0]", "def get_unvisited_neighbours(self, grid):\n\t\tfor neighbour in self.get_neighbours(grid):\n\t\t\tif not neighbour.visited:\n\t\t\t\tyield neighbour", "def Explore(self,x,y):\n if [x,y]==self.MazeKey and self.State:\n self.State=False\n return self.Path\n\n if [x,y]==self.CurrentCell and not self.State:\n return self.Path\n \n if not self.North[x][y] and [x,y+1] not in self.Path: #If false and not in the explore path so far.\n self.track.push([x,y],self.FootPrints)\n self.track.push([x,y+1],self.Path)\n return self.Explore(x,y+1)\n \n elif not self.East[x][y] and [x+1,y] not in self.Path:\n self.track.push([x,y],self.FootPrints)\n self.track.push([x+1,y],self.Path)\n return self.Explore(x+1,y)\n \n elif not self.South[x][y] and [x,y-1] not in self.Path:\n self.track.push([x,y],self.FootPrints)\n self.track.push([x,y-1],self.Path)\n return self.Explore(x,y-1)\n\n elif not self.West[x][y] and [x-1,y] not in self.Path:\n self.track.push([x,y],self.FootPrints)\n self.track.push([x-1,y],self.Path)\n return self.Explore(x-1,y)\n else:\n prev=self.track.pop(self.FootPrints)\n x=prev[0]\n y=prev[1]\n self.track.push(prev,self.Path)\n return self.Explore(x,y)", "def find_los_neighbors(seats, occupied_self, i, j):\n values = []\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n values.append(\n find_nearest_los_seat(seats, occupied_seats, i, j, dx, dy)\n )\n return values", "def reachable(maze: list, start: tuple, goal: tuple):\n n = len(maze) # Get the dimension of the maze\n\n #========================================#\n # Some data checking statements\n\n if (not is_valid(start, n)):\n print(\"reachable: Start indices outside maze dimensions\")\n return False\n elif (not is_valid(goal, n)):\n print(\"reachable: Goal indices outside maze dimensions\")\n return False\n\n # End data checking statements\n #========================================#\n\n # We can use a copy of the maze to keep track of visited squares (Considered using a set here, thought that time efficiency was important)\n visited = copy.deepcopy(maze)\n # visited = list(map(list, maze)) # Alternative to using copy.deepcopy\n stack = [] # Define our stack of \"fringe\" squares\n stack.append(start) # Push the start square onto our stack\n visited[start[0]][start[1]] = 1 # Set our start to visited\n\n while (len(stack)): # While there exists items in the stack\n current = stack.pop() # Pop the last element\n\n if (current == goal):\n return True # If current is the goal, we found it!\n\n current_i, current_j = current # Unpack the current pair\n\n # Now we want to add all unvisited squares that are possible to get to from the current square\n for i in range(len(nearby_offsets)):\n offset_i, offset_j = nearby_offsets[i]\n possible = (current_i + offset_i, current_j + offset_j)\n # print(f\"Current possible: {possible_i} {possible_j}\") # DEBUG\n if (is_valid(possible, n)): # If the calculated square is within the maze matrix\n if (not visited[possible[0]][possible[1]]):\n stack.append(possible)\n visited[possible[0]][possible[1]] = 1\n return False # If the while loop goes out, and the stack is empty, then there is no possible path", "def get_neighbours(self, coords):\n\n\t dxdy = [(-1,-2),(0,-2),(1,-2),(-2,-1),(-1,-1),(0,-1),(1,-1),(2,-1),\n\t (-2,0),(-1,0),(1,0),(2,0),(-2,1),(-1,1),(0,1),(1,1),(2,1),\n\t (-1,2),(0,2),(1,2),(0,0)]\n\t neighbours = []\n\t for dx, dy in dxdy:\n\t neighbour_coords = coords[0] + dx, coords[1] + dy\n\t if not (0 <= neighbour_coords[0] < self.nx and\n\t 0 <= neighbour_coords[1] < self.ny):\n\t # We're off the grid: no neighbours here.\n\t continue\n\t neighbour_cell = self.cells[neighbour_coords]\n\t if neighbour_cell is not None:\n\t # This cell is occupied: store this index of the contained point.\n\t neighbours.append(neighbour_cell)\n\t return neighbours", "def graph_search(problem, open_nodes):\n explored = [problem.initial]\n open_nodes.append(Node(problem.initial))\n while len(open_nodes) > 0:\n node = open_nodes.pop()\n if problem.goal_test(node.state):\n #print \"Path cost: %d\" % node.path_cost\n return node.solution()\n for child in node.expand(problem):\n if child.state not in explored:\n open_nodes.append(child)\n explored.append(child.state)\n return None", "def add_to_open(open, neighbour):\n for node in open:\n if neighbour == node and neighbour.f >= node.f:\n # Will not add if there already exists the same node in open that has lower f value\n return False\n\n return True", "def Breakwalls(self):\n \n \n if len(self.VisitedCoord)==self.TotalCells: #Base case for the recursive call.\n \n return self.VisitedCoord #When base case is hit, returns the list of all the visited cells. [[x,y],[x,y],[x,y],[x,y]]\n xval=self.CurrentCell[0] #Breaks Current Cell up, xval is the x value \n yval=self.CurrentCell[1] #yval is the y value\n \n \n if (yval+1==self.N+1 or [xval,yval+1] in self.VisitedCoord) and (yval-1==0 or [xval,yval-1] in self.VisitedCoord) \\\n and (xval+1==self.N+1 or [xval+1,yval] in self.VisitedCoord) and (xval-1==0 or [xval-1,yval] in self.VisitedCoord): #If the Cell is surrounded\n #and can't move \n self.CurrentCell=self.track.pop(self.CellStack) #Pop the last coord from the cell stack and make that current cell.\n #print(\"Current: \", self.CurrentCell)\n return self.Breakwalls() #Recursive call to Breakwalls \n \n self.track.push(self.CurrentCell,self.CellStack) #If cell not surrounded push the current cell onto the cellstack and begin looking for a neighbour \n while True: #Remember Cell stack is where you out your foot down.\n Directions=[\"North\",\"South\",\"East\",\"West\"]\n randir=randrange(0,len(Directions))\n dir=Directions[randir] #Choose a random direction \n #print(dir,yval+1,self.CurrentCell,self.VisitedCoord)\n \n if dir== \"North\" and yval+1<self.N+1 and [xval,yval+1] not in self.VisitedCoord: #if direction and not out of bounds. Self.N+ is the border.\n self.North[xval][yval]=self.South[xval][yval+1] = False #if less than that, you are within the border \n yval+=1;break \n elif dir ==\"South\" and yval-1>0 and [xval,yval-1] not in self.VisitedCoord: #in the southern part, 0 is the border.if >0, within actual maze.\n self.South[xval][yval]=self.North[xval][yval-1] = False \n yval-=1;break \n elif dir ==\"East\" and xval+1 <self.N+1 and [xval+1,yval] not in self.VisitedCoord:\n self.East[xval][yval]=self.West[xval+1][yval] = False\n xval+=1;break \n elif dir ==\"West\" and xval-1 > 0 and [xval-1,yval] not in self.VisitedCoord:\n self.West[xval][yval]=self.East[xval-1][yval] =False\n xval-=1;break\n\n #Above chooses a random direction and if condition checks out, breaks the wall by setting it to false and increments/decrements the respective value\n #to reflect N/S/E/W.\n self.CurrentCell=[xval,yval] #xval/yval was incremented so the new value remains, all thats left is to make the current cell that new coord.\n \n self.track.push(self.CurrentCell,self.VisitedCoord) #The new current cell is now pushed onto the visited coordinates stack \n \n return self.Breakwalls() ##Recursive call on the current cell. Everything happens again on that new coordinate. ", "def Find_Path(self):\n closed_nodes_map = [] # map of closed (tried-out) nodes\n open_nodes_map = [] # map of open (not-yet-tried) nodes\n dir_map = [] # map of directions\n row = [0] * self.n\n for i in range(self.m): # create 2d arrays\n closed_nodes_map.append(list(row))\n open_nodes_map.append(list(row))\n dir_map.append(list(row))\n \n pq = [[], []] # priority queues of open (not-yet-tried) nodes\n pqi = 0 # priority queue index\n # create the start node and push into list of open nodes\n n0 = node(self.xStart, self.yStart, 0.0, 0.0)\n n0.updatePriority(self.xFinish, self.yFinish)\n heappush(pq[pqi], n0)\n open_nodes_map[self.yStart][self.xStart] = n0.priority # mark it on the open nodes map\n \n # A* search\n while len(pq[pqi]) > 0:\n # get the current node w/ the highest priority\n # from the list of open nodes\n n1 = pq[pqi][0] # top node\n n0 = node(n1.xPos, n1.yPos, n1.distance, n1.priority)\n x = n0.xPos\n y = n0.yPos\n heappop(pq[pqi]) # remove the node from the open list\n open_nodes_map[y][x] = 0\n # mark it on the closed nodes map\n closed_nodes_map[y][x] = 1\n \n # quit searching when the goal state is reached\n if x == self.xFinish and y == self.yFinish:\n # Generate the path from finish to start by following the \n # directions.\n return self.Reconstruct_Path(dir_map)\n \n # generate moves (child nodes) in all possible directions\n for i in range(self.num_directions):\n new_x = x + self.dx[i]\n new_y = y + self.dy[i]\n Flag=True\n if not (new_x < 0 or new_x > self.n-1 or new_y < 0 or new_y > self.m - 1\n or self.MAP[new_y][new_x] == 1 or closed_nodes_map[new_y][new_x] == 1):\n # Check to see if the extended path runs through any obstacles\n if (abs(self.dx[i])>1 or abs(self.dy[i])>1):\n # Need to check that the path does not pass an object\n JumpCells=2*max(abs(self.dx[i]),abs(self.dy[i]))-1\n for K in range(1,JumpCells):\n YPOS=int(round(K*1.0*self.dy[i]/JumpCells))\n XPOS=int(round(K*1.0*self.dx[i]/JumpCells))\n if (self.MAP[y+YPOS][x+XPOS]==1):\n Flag=False\n if Flag: \n # generate a child node\n m0 = node(new_x, new_y, n0.distance, n0.priority)\n m0.calc_cost(self.dx[i], self.dy[i])\n m0.updatePriority(self.xFinish, self.yFinish)\n # if it is not in the open list then add into that\n if open_nodes_map[new_y][new_x] == 0:\n open_nodes_map[new_y][new_x] = m0.priority\n heappush(pq[pqi], m0)\n # mark its parent node direction\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n elif open_nodes_map[new_y][new_x] > m0.priority:\n # update the priority info\n open_nodes_map[new_y][new_x] = m0.priority\n # update the parent direction info\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n # replace the node\n # by emptying one pq to the other one\n # except the node to be replaced will be ignored\n # and the new node will be pushed in instead\n while not (pq[pqi][0].xPos == new_x and pq[pqi][0].yPos == new_y):\n heappush(pq[1 - pqi], pq[pqi][0])\n heappop(pq[pqi])\n heappop(pq[pqi]) # remove the wanted node\n # empty the larger size pq to the smaller one\n if len(pq[pqi]) > len(pq[1 - pqi]):\n pqi = 1 - pqi\n while len(pq[pqi]) > 0:\n heappush(pq[1-pqi], pq[pqi][0])\n heappop(pq[pqi]) \n pqi = 1 - pqi\n heappush(pq[pqi], m0) # add the better node instead\n return '','' # no route found", "def checkNeighbours(data):\n features = 0\n background = 0\n neighbours = [data[0,0],data[0,1],data[0,2],data[1,2],data[2,2],data[2,1],data[2,0],data[1,0]]\n fourConnected = False\n lastPoint = neighbours[-1] #Needed for checking a complete transition cycle\n for n in neighbours:\n if not n:\n features += 1\n elif fourConnected:\n background += 1\n\n fourConnected = not fourConnected\n lastPoint = n\n\n for pos,corner in enumerate(corners):\n if numpy.alltrue(data == corner):\n cornerPos = pos+1\n break\n else:\n cornerPos = 0\n return (features,background,cornerPos)", "def search(self) -> int:\n # crete node list\n for x in range(self.n):\n for y in range(self.n):\n if not self.grid[y][x] == 0:\n self.all_nodes.append((x, y))\n # recursively create paths\n i = 0\n paths = [[(0, 0)]]\n while i < self.n * self.n:\n paths = self.generate_paths(paths)\n if isinstance(paths, int):\n return paths\n i += 1\n\n return -1", "def findImmediateNeighbours(self):\n immediateNeighbours = []\n\n if self.xCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate - 1, self.yCoordinate))\n\n if self.xCoordinate + 1 < 395:\n immediateNeighbours.append(PixelPosition(self.xCoordinate + 1, self.yCoordinate))\n\n if self.yCoordinate + 1 < 500:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate + 1))\n\n if self.yCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate - 1))\n\n return immediateNeighbours", "def find_neighbors(self):\n #checked#\n ###your code here###\n for address in self.homes:\n for i in range(-1, 2):\n for j in range(-1,2):\n neighbor_address=(address[0]+i, address[1]+j)\n if neighbor_address in self.homes and neighbor_address!=address:\n self.homes[address].neighbors.append(self.homes[neighbor_address])", "def neighbors((min_i, min_j), (max_i, max_j), (i, j)):\n if j + 1 <= max_j:\n yield (i, j + 1)\n if j - 1 >= min_j:\n yield (i, j - 1)\n if i + 1 <= max_i:\n yield (i + 1, j)\n if i - 1 >= min_i:\n yield (i - 1, j)", "def count_paths((min_i, min_j), (max_i, max_j)):\n\n def explore((i, j), path):\n found = 0\n for (x, y) in neighbors((min_i, min_j), (max_i, max_j), (i, j)):\n if (x, y) == (max_i, max_j):\n found += 1\n debug(\"neighbor %r of node %r on path %r is a goal node: +1\" % ((x, y), (i, j), path))\n elif (x, y) in path: \n debug(\"neighbor %r of node %r already on path %r; ignoring...\" % ((x, y), (i, j), path))\n continue\n else:\n debug(\"neighbor %r of node %r not already on path %r; exploring ...\" % ((x, y), (i, j), path))\n found += explore((x, y), mkpath(path, (x, y)))\n return found\n return explore((0, 0), set([(0, 0)]))", "def neighbours(self, i, j):\n nearest = []\n for x_offset, y_offset in [(0, -1), (0, 1), (1, 0), (-1, 0)]:\n try:\n nearest.append(self.as_list[checkNonNegIndex(i + x_offset)][checkNonNegIndex(j + y_offset)])\n except IndexError:\n continue\n except TypeError:\n continue\n return nearest", "def depth_first_search(self, y_coordinate, x_coordinate, surface):\n current_y = y_coordinate\n current_x = x_coordinate\n while not self.__solved:\n # If not in stack, add to stack and visualize\n if [current_y, current_x] not in self.__stack:\n self.__add_to_stack_and_visualize(current_y, current_x, surface)\n # Gets current cell\n cell = self.__board[current_y, current_x]\n\n # If no wall in given direction and not solved\n if not cell.get_wall(Direction.NORTH) and not self.__solved:\n # If next one is the solution, then break and finished\n if current_y - 1 < 0:\n self.__solved = True\n break\n # If next cell is not part of the path - checks for next cell\n next_cell = self.__board[current_y - 1, current_x]\n if next_cell.get_used() == Path.NO:\n current_y = current_y - 1\n current_x = current_x\n continue\n\n if not cell.get_wall(Direction.EAST) and not self.__solved:\n if current_x + 1 > self.__board.shape[1] - 1:\n self.__solved = True\n break\n next_cell = self.__board[current_y, current_x + 1]\n if next_cell.get_used() == Path.NO:\n current_y = current_y\n current_x = current_x + 1\n continue\n\n if not cell.get_wall(Direction.SOUTH) and not self.__solved:\n if current_y + 1 > self.__board.shape[0] - 1:\n self.__solved = True\n break\n next_cell = self.__board[current_y + 1, current_x]\n if next_cell.get_used() == Path.NO:\n current_y = current_y + 1\n current_x = current_x\n continue\n\n if not cell.get_wall(Direction.WEST) and not self.__solved:\n if current_x - 1 < 0:\n self.__solved = True\n break\n next_cell = self.__board[current_y, current_x - 1]\n if next_cell.get_used() == Path.NO:\n current_y = current_y\n current_x = current_x - 1\n continue\n\n # If no there are no more possibilities for cell, pop of stack and visualize\n if not self.__solved:\n self.__pop_off_stack_and_visualize(current_y, current_x, surface)\n current_y = self.__stack[-1][0]\n current_x = self.__stack[-1][1]", "def open(self, row, col):\n self._validate_indexes(row, col)\n self._grid[row][col] = True\n site_idx = row * self._n + col\n # connect to left site\n if col > 0 and self.is_open(row, col - 1):\n self._uf.union(site_idx, site_idx - 1)\n # connect to right site\n if col < self._n - 1 and self.is_open(row, col + 1):\n self._uf.union(site_idx, site_idx + 1)\n # connect to upper site\n if row > 0 and self.is_open(row - 1, col):\n self._uf.union(site_idx, (row - 1) * self._n + col)\n # connect to lower site\n if row < self._n - 1 and self.is_open(row + 1, col):\n self._uf.union(site_idx, (row + 1) * self._n + col)", "def test_get_neighbours(self):\n data = [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]\n board = Board(data)\n\n # [pos(x, y), #neighbours]\n posx_posy_n = [[0, 0, 2], [0, 1, 3], [0, 2, 2],\n [1, 0, 3], [1, 1, 4], [1, 2, 3],\n [2, 0, 2], [2, 1, 3], [2, 2, 2]]\n for x, y, n in posx_posy_n:\n neighbours = [i for i in board.get_neighbours(x, y)]\n self.assertEquals(len(neighbours), n)", "def get_open_cells(field: MineField, cell: Cell) -> List[Cell]:\n # TODO re-implement iteratively to safe stack space (especially for large boards)\n if cell.is_flag or cell.is_mine or not field.cell_is_safe(cell):\n return [cell]\n \n if cell.visited:\n return\n\n open_cells: list = list()\n cell.visited = True\n open_cells.append(cell)\n\n for cell in field.surrounding_cells(cell):\n open_cells.append(cell)\n if cell.is_flag or cell.visited or not field.cell_is_safe(cell):\n continue\n\n\n open_cells += get_open_cells(field, cell)\n\n return open_cells", "def _check_satisfied_neighborhood(\n recursive_counter: int, stop_recursive: int, matrix_size: int\n) -> bool:\n return recursive_counter >= stop_recursive * (matrix_size ** 2)", "def open_spots(self):\n ret = []\n for i in range(1,25):\n if self.nodes[i].piece == None:\n ret.append(i)\n return ret", "def neighboring_cells(self, cell_id, include_self=False):\n\t\tx, y, t = self._xyt_from_cell_id(cell_id)\n\n\t\tncells = bhpix.neighbors(x, y, self.level, include_self)\n\t\tfor (cx, cy) in ncells:\n\t\t\tif fabs(fabs(cx) - fabs(cy)) > 0.5:\n\t\t\t\tprint \"PROBLEM: \", x, y, cx, cy\n\t\t\t\tprint ncells\n\n\t\tnhood = [ self._cell_id_for_xyt(x, y, t) for (x, y) in ncells ]\n\n\t\t# TODO: Remove once we're confident it works\n\t\trrr = set([ self._xyt_from_cell_id(cid)[:2] for cid in nhood ])\n\t\tassert rrr == ncells\n\t\tassert cell_id not in rrr\n\n\t\treturn nhood", "def wallsAndGates(self, rooms: List[List[int]]) -> None:\n if rooms == []:\n return\n \n row = len(rooms)\n column = len(rooms[0])\n visited = [[False for i in range(column)] for j in range(row)]\n def valid(row_index, column_index):\n if row_index < row and row_index >= 0 and column_index< column and column_index >= 0:\n return True\n return False\n \n \n def bfs_traverse(row_index, column_index, distance):\n if valid(row_index, column_index) == False or rooms[row_index][column_index] < distance:\n return\n else:\n # if rooms[row_index] [column_index] != -1 and rooms[row_index] [column_index] != 0:\n if distance < rooms[row_index][column_index]:\n\n rooms[row_index][column_index] = distance\n if rooms[row_index] [column_index] != -1:\n if valid(row_index+1, column_index):\n bfs_traverse(row_index+1, column_index, distance+1)\n if valid(row_index, column_index+1):\n bfs_traverse(row_index, column_index +1 , distance+1)\n if valid(row_index-1, column_index):\n bfs_traverse(row_index-1, column_index, distance+1)\n if valid(row_index, column_index-1):\n bfs_traverse(row_index, column_index-1, distance+1)\n \n \n for row_index in range(row):\n for column_index in range(column):\n if rooms[row_index][column_index] == 0:\n bfs_traverse(row_index, column_index, 0)", "def get_neighbouring_nodes(node) :\r\n\r\n connected_nodes = [] #A list of the connected nodes\r\n\r\n #Checking if the node belongs to the 1st row\r\n if(node.coords[0] != 0) :\r\n connected_node = Node((node.coords[0] - 1, node.coords[1]), goal_pos, node.gn_value - 1)\r\n #Checking if the node is an obstacle\r\n if(not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the last row\r\n if(node.coords[0] != grid_dims[0] - 1) :\r\n connected_node = Node((node.coords[0] + 1, node.coords[1]), goal_pos, node.gn_value - 1)\r\n #Checking if the node is an obstacle\r\n if(not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the 1st column\r\n if(node.coords[1] != 0) :\r\n connected_node = Node((node.coords[0], node.coords[1] - 1), goal_pos, node.gn_value - 1)\r\n #Checking if the node is an obstacle\r\n if(not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the 1st column\r\n if(node.coords[1] != grid_dims[1] - 1) :\r\n connected_node = Node((node.coords[0], node.coords[1] + 1), goal_pos, node.gn_value - 1)\r\n #Checking if the node is an obstacle\r\n if(not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n return connected_nodes", "def test_get_neighbours(self):\n self.assertEqual(self.game.get_neighbours(2,2), [[1, 1], [1, 2], [1, 3], \n [2, 1], [2, 3], [3, 1], [3, 2], [3, 3]])\n self.assertEqual(self.game.get_neighbours(0,0), [[0, 1], [1, 0], [1, 1]])\n self.assertEqual(self.game.get_neighbours(44,0), [[43, 0], [43, 1], [44, 1]])\n self.assertEqual(self.game.get_neighbours(45,0), [])\n self.assertEqual(self.game.get_neighbours(44,89), [[43, 88], [43, 89], [44, 88]])", "def get_neighbours(x, y, board):\n return [get_left(x, y, board), get_upper(x, y, board), get_right(x, y, board), get_lower(x, y, board)]", "def wallsAndGates(self, rooms: List[List[int]]) -> None:\n \n #traverse room, find 0, and start doing bfs\n #update with min distance\n \n if not rooms:\n return\n \n row = len(rooms)\n col = len(rooms[0])\n queue = []\n def bfs():\n \n directions = [[0,1], [1,0], [-1,0], [0,-1]]\n \n while queue:\n i, j = queue.pop()\n #search neighbor\n for d in directions:\n new_i = i+d[0]\n new_j = j+d[1]\n if 0<=new_i<row and 0<=new_j<col and rooms[new_i][new_j]!= -1:\n #update\n if rooms[i][j]+1 < rooms[new_i][new_j]:\n rooms[new_i][new_j] = rooms[i][j]+1\n queue.append([new_i,new_j])\n\n for i in range(row):\n for j in range(col):\n if rooms[i][j] == 0:\n queue.append([i,j])\n bfs()", "def wallsAndGates(self, rooms: List[List[int]]) -> None:\n # start with the gate and search the empty cells it can reach\n # then we take the smallest one\n if not rooms:\n return\n\n m, n = len(rooms), len(rooms[0])\n \n queue = []\n for i in range(m):\n for j in range(n):\n if rooms[i][j] == 0:\n queue.append((i, j))\n\n\n for x, y in queue:\n dist = rooms[x][y] + 1\n\n for dx, dy in ((-1, 0), (1, 0), (0, 1), (0, -1)):\n new_x, new_y = x+dx, y+dy\n if 0 <= new_x < m and 0 <= new_y < n and rooms[new_x][new_y] == 2147483647:\n rooms[new_x][new_y] = dist\n queue.append((new_x, new_y))", "def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours", "def flood_island(\n grid: list[list[int]],\n row: int,\n col: int,\n visited: list[list[bool]],\n island_points: Optional[list[tuple[int, int]]] = None) -> int:\n if island_points is None:\n island_points = []\n\n island_points.append((row, col))\n visited[row][col] = True\n\n rows, cols = len(grid), len(grid[0])\n\n for i, j in (-1, 0), (0, -1), (0, 1), (1, 0):\n new_row, new_col = row+i, col+j\n if new_row < 0 or new_row >= rows:\n continue\n if new_col < 0 or new_col >= cols:\n continue\n\n if visited[new_row][new_col]:\n continue\n\n if grid[new_row][new_col] == 1:\n flood_island(grid, new_row, new_col, visited, island_points)\n\n return len(island_points)", "def __neighbors(self, x, y):\n if (x > 0) and not self.is_wall(x - 1, y):\n yield x - 1, y\n if (x < self.width - 1) and not self.is_wall(x + 1, y):\n yield x + 1, y\n if (y > 0) and not self.is_wall(x, y - 1):\n yield x, y - 1\n if (y < self.height - 1) and not self.is_wall(x, y + 1):\n yield x, y + 1", "def no_neighbour(x: int, y: int) -> bool:\r\n if not wall_check(x, y-1, False):\r\n if example[x, y-1] == 0:\r\n return False\r\n if not wall_check(x, y+1, False):\r\n if example[x, y+1] == 0:\r\n return False\r\n if not wall_check(x+1, y, False):\r\n if example[x+1, y] == 0:\r\n return False\r\n if not wall_check(x-1, y, False):\r\n if example[x-1, y] == 0:\r\n return False\r\n return True", "def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #Assign each of the neighbours\n # Top-left to the top-right\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # Left to right\n left = (row, col - 1)\n # The '(row, col)' coordinates passed to this\n # function are situated here\n right = (row, col + 1)\n \n # Bottom-left to bottom-right\n bottom_left = (row + 1, col - 1)\n bottom_center = (row + 1, col)\n bottom_right = (row + 1, col + 1)\n \n return [top_left, top_center, top_right,\n left, right,\n bottom_left, bottom_center, bottom_right]", "def explore_all_neighbors(z, i):\n\n n = len(z)\n q = [i]\n found = False\n while len(q) > 0:\n i = q.pop()\n\n for j in range(n):\n\n if z[i][j] == 1: # Reset All Neighbours\n z[i][j] = 0\n z[j][i] = 0\n found = True\n if i != j:\n q.append(j)\n\n return found", "def get_connected_nodes(node, current_path_len) :\r\n\r\n connected_nodes = [] #A list of the connected nodes\r\n closed_list_coords = get_path_coordinates(closed_list)\r\n\r\n #Checking if the node belongs to the 1st row\r\n if(node.coords[0] != 0) :\r\n connected_node = Node((node.coords[0] - 1, node.coords[1]), goal_pos, current_path_len)\r\n #Checking if the node has already been traversed or is it is an obstacle\r\n if(not connected_node.coords in closed_list_coords and not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the last row\r\n if(node.coords[0] != grid_dims[0] - 1) :\r\n connected_node = Node((node.coords[0] + 1, node.coords[1]), goal_pos, current_path_len)\r\n #Checking if the node has already been traversed or is it is an obstacle\r\n if(not connected_node.coords in closed_list_coords and not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the 1st column\r\n if(node.coords[1] != 0) :\r\n connected_node = Node((node.coords[0], node.coords[1] - 1), goal_pos, current_path_len)\r\n #Checking if the node has already been traversed or is it is an obstacle\r\n if(not connected_node.coords in closed_list_coords and not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the 1st column\r\n if(node.coords[1] != grid_dims[1] - 1) :\r\n connected_node = Node((node.coords[0], node.coords[1] + 1), goal_pos, current_path_len)\r\n #Checking if the node has already been traversed or is it is an obstacle\r\n if(not connected_node.coords in closed_list_coords and not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n return connected_nodes", "def wallsAndGates(self, rooms: List[List[int]]) -> None:\n if rooms == []:\n return\n \n row = len(rooms)\n column = len(rooms[0])\n visited = [[False for i in range(column)] for j in range(row)]\n def valid(row_index, column_index):\n if row_index < row and row_index >= 0 and column_index< column and column_index >= 0:\n return True\n return False\n \n \n def bfs_traverse(row_index, column_index, distance):\n if valid(row_index, column_index) == False or rooms[row_index][column_index] < distance:\n return\n else:\n # if rooms[row_index] [column_index] != -1 and rooms[row_index] [column_index] != 0:\n\n rooms[row_index][column_index] = distance\n \n bfs_traverse(row_index+1, column_index, distance+1)\n\n bfs_traverse(row_index, column_index +1 , distance+1)\n\n bfs_traverse(row_index-1, column_index, distance+1)\n\n bfs_traverse(row_index, column_index-1, distance+1)\n \n \n for row_index in range(row):\n for column_index in range(column):\n if rooms[row_index][column_index] == 0:\n bfs_traverse(row_index, column_index, 0)", "def test_neighbor():\n UP = (0, -1)\n LEFT = (-1, 0)\n DOWN_RIGHT = (1, 1)\n HERE = (0, 0)\n\n c1 = Cell(2, 6, 100)\n c1_up = c1.neighbor(UP)\n c1_left = c1.neighbor(LEFT)\n c1_down_right = c1.neighbor(DOWN_RIGHT)\n c1_here = c1.neighbor(HERE)\n\n assert c1_up == (2, 5)\n assert c1_up[0] == 2\n assert c1_left == (1, 6)\n assert c1_left[1] == 6\n assert c1_down_right == (3, 7)\n assert c1_here == (2, 6)\n\n c2 = Cell(4, 2, 200)\n c2_up = c2.neighbor(UP)\n c2_left = c2.neighbor(LEFT)\n c2_down_right = c2.neighbor(DOWN_RIGHT)\n c2_here = c2.neighbor(HERE)\n\n assert c2_up == (4, 1)\n assert c2_left == (3, 2)\n assert c2_down_right == (5, 3)\n assert c2_here == (4, 2)", "def _set_node_neighbours(self, node):\n all_neighbours = [self.BOARD[node.y + y][node.x + x] for x in reversed(range(-1, 2)) for y in\n reversed(range(-1, 2))\n if 0 <= node.x + x < self.len_x and 0 <= node.y + y < self.len_y]\n non_traversable_neighbours = []\n for neighbour in all_neighbours:\n if not neighbour.traversable:\n non_traversable_neighbours.append(neighbour)\n elif neighbour.x != node.x and neighbour.y != node.y:\n x_diff = neighbour.x - node.x\n y_diff = neighbour.y - node.y\n if not self.BOARD[node.y + y_diff][node.x].traversable and \\\n not self.BOARD[node.y][node.x + x_diff].traversable:\n non_traversable_neighbours.append(neighbour)\n node.neighbours = [neighbour for neighbour in all_neighbours if neighbour not in non_traversable_neighbours]", "def get_neighbours(coords):\n\n dxdy = [(-1,-2),(0,-2),(1,-2),(-2,-1),(-1,-1),(0,-1),(1,-1),(2,-1),\n (-2,0),(-1,0),(1,0),(2,0),(-2,1),(-1,1),(0,1),(1,1),(2,1),\n (-1,2),(0,2),(1,2),(0,0)]\n neighbours = []\n for dx, dy in dxdy:\n neighbour_coords = coords[0] + dx, coords[1] + dy\n if not (0 <= neighbour_coords[0] < nx and\n 0 <= neighbour_coords[1] < ny):\n # We're off the grid: no neighbours here.\n continue\n neighbour_cell = cells[neighbour_coords]\n if neighbour_cell is not None:\n # This cell is occupied: store this index of the contained point.\n neighbours.append(neighbour_cell)\n return neighbours", "def neighbours(number: int, number_sectors: int) -> [int, int, int, int]:\n col = number % number_sectors\n row = number // number_sectors\n\n nieg = [number - number_sectors, number + number_sectors, number - 1, number + 1]\n\n if row == 0:\n nieg[0] = -1\n if row == number_sectors - 1:\n nieg[1] = -1\n if col == 0:\n nieg[2] = -1\n if col == number_sectors - 1:\n nieg[3] = -1\n return nieg", "def get_neighbours(self, business, num=5, add_self=False):\n\n def radius_step(radius, num_longtidues, num_latitudes, time):\n \"\"\"expand the search-radius exponentially\"\"\"\n step = int(exp(time))\n radius['long_down'] = radius['long_down'] - step\n if radius['long_down'] <= 0:\n radius['long_down'] = 0\n radius['long_up'] = radius['long_up'] + step\n if radius['long_up'] >= num_longtidues - 1:\n radius['long_up'] = num_longtidues - 1\n radius['lat_down'] = radius['lat_down'] - step\n if radius['lat_down'] <= 0:\n radius['lat_down'] = 0\n radius['lat_up'] = radius['lat_up'] + step\n if radius['lat_up'] >= num_latitudes - 1:\n radius['lat_up'] = num_latitudes - 1\n\n cell = self.get_cell(business)\n b_long = business.longitude\n b_lat = business.latitude\n radius = {'long_down': cell[0], 'long_up': cell[0] + 1,\n 'lat_down': cell[1], 'lat_up': cell[1] + 1}\n ret = []\n time = 0\n inner_radius = 0\n while len(ret) < num and inner_radius < 100:\n found = []\n radius_step(radius, self.longitudes.size, self.latitudes.size,\n time)\n time = time + 1\n for row in range(radius['long_down'], radius['long_up']):\n for col in range(radius['lat_down'], radius['lat_up']):\n if row in self.cells and col in self.cells[row]:\n for item in self.cells[row][col]:\n if item not in ret:\n found.append(item)\n if (len(found) + len(ret)) < num:\n continue\n # We approximate the in-radius of the search-rectangle by half of\n # the distance between the centers of left and right border\n # (Not exactly the in-radius on the surface of a sphereoid, but\n # easier to calculate)\n inner_radius = haversine((self.longitudes[radius['long_down']],\n self.latitudes[cell[1]]),\n (self.longitudes[radius['long_up']],\n self.latitudes[cell[1]])) / 2\n for neighbour in found:\n n_long = neighbour['longitude']\n n_lat = neighbour['latitude']\n dist = haversine((b_long, b_lat), (n_long, n_lat))\n # make sure we only include businesses in the in-circle of the\n # search-rectangle\n if dist <= inner_radius and \\\n (add_self or neighbour['index'] != business.name):\n neighbour['distance'] = dist\n ret.append(neighbour)\n return sorted(ret, key=itemgetter('distance'))[:num]", "def init_cell(self, x, y):\n self.matrix_walls[y][x]['is_init'] = True\n\n # calculate max depth\n self.curr_depth += 1\n if self.curr_depth > self.max_depth:\n self.max_depth = self.curr_depth\n self.max_depth_x = x\n self.max_depth_y = y\n not_inited = self._get_not_inited_paths(x, y)\n\n # get all neighboring not inited cell except cell from where come\n while not_inited:\n path = choice(not_inited)\n not_inited.remove(path)\n if path == Maze.RIGHT:\n # crush wall by path and crush wall neighboring cell by inverse path\n self.matrix_walls[y][x][Maze.RIGHT] = False\n self.matrix_walls[y][x + 1][Maze.LEFT] = False\n self.init_cell(x + 1, y)\n elif path == Maze.LEFT:\n self.matrix_walls[y][x][Maze.LEFT] = False\n self.matrix_walls[y][x - 1][Maze.RIGHT] = False\n self.init_cell(x - 1, y)\n elif path == Maze.TOP:\n self.matrix_walls[y][x][Maze.TOP] = False\n self.matrix_walls[y - 1][x][Maze.BOTTOM] = False\n self.init_cell(x, y - 1)\n elif path == Maze.BOTTOM:\n self.matrix_walls[y][x][Maze.BOTTOM] = False\n self.matrix_walls[y + 1][x][Maze.TOP] = False\n self.init_cell(x, y + 1)\n self.curr_depth -= 1\n not_inited = self._get_not_inited_paths(x, y)", "def check_neighbours(coordinates):\n x_coord = coordinates[0]\n y_coord = coordinates[1]\n coordinates_value = 0\n for x_move in [-1, 0, 1]:\n x = x_coord + x_move\n for y_move in [-1, 0, 1]:\n y = y_coord + y_move\n try:\n value = grid[(x,y)]\n coordinates_value += value\n except KeyError:\n pass\n\n grid[coordinates] = coordinates_value\n # print(coordinates_value)\n return coordinates_value", "def open(self, xy):\n if xy in self.opened:\n return\n \n self.opened.add(xy)\n if xy in self._mines:\n self.mines_near[xy] = 'mine'\n self.flag(xy) # simplifies playing after death logic\n self.lose()\n else:\n self.mines_near[xy] = len(self.neighbours[xy] & self._mines)\n self.flagged.discard(xy)\n self.empty_remaining -= 1\n if self.empty_remaining <= 0:\n self.win()", "def generate_nearby_cells(self):\n for y in range(len(self.island_map)):\n for x in range(len(self.island_map[y])):\n list_of_nearby_cells = []\n\n if y != 0:\n self.generate_cell_above(x, y, list_of_nearby_cells)\n\n if x != 0:\n self.generate_cell_left(x, y, list_of_nearby_cells)\n\n if y != len(self.island_map)-1:\n self.generate_cell_below(x, y, list_of_nearby_cells)\n\n if x != len(self.island_map[y])-1:\n self.generate_cell_right(x, y, list_of_nearby_cells)\n\n self.island_map[y][x].nearby_cells = list_of_nearby_cells", "def numIslands(grid):\n # count to store each new island found\n count = 0\n # If the grid is empty, return 0\n if not grid:\n return count\n\n y_max = len(grid)\n x_max = len(grid[0])\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n dfs(grid, i, j)\n count += 1\n return count", "def __find_neighbors(self, list_of_nodes):\n for node in list_of_nodes:\n x_pos = node.location[0]\n y_pos = node.location[1]\n if x_pos - 1 >= 0:\n # find the node in the list of nodes\n # add it as a neighbor of the current node\n neighbor = self.__find_neighbor_at(x_pos - 1, y_pos, list_of_nodes)\n node.add_neighbor(neighbor)\n if x_pos + 1 <= self.__n_rows - 1:\n neighbor = self.__find_neighbor_at(x_pos + 1, y_pos, list_of_nodes)\n node.add_neighbor(neighbor)\n if y_pos - 1 >= 0:\n neighbor = self.__find_neighbor_at(x_pos, y_pos - 1, list_of_nodes)\n node.add_neighbor(neighbor)\n if y_pos + 1 <= self.__n_columns - 1:\n neighbor = self.__find_neighbor_at(x_pos, y_pos + 1, list_of_nodes)\n node.add_neighbor(neighbor)", "def check_empty_neighbours(self, cell):\n\t\tneighbours = self.get_neighbours(cell)\n\t\tflag = True\n\t\tfor neighbour in neighbours:\n\t\t\tif neighbour.state != 0:\n\t\t\t\tflag = False\n\t\treturn flag", "def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #assign each of neighbours corrds\n #top left to top rigt\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # left to right (center)\n left = (row, col - 1)\n # the (row, col) cordinates passed into this function are situated here\n right = (row, col + 1)\n \n #bottom-left to bottom-right\n bottom_left = (row +1, col -1)\n bottom_center = (row +1, col)\n bottom_right = (row +1, col +1)\n \n return [top_left, top_center, top_right,\n left , right ,\n bottom_left, bottom_center, bottom_right]", "def get_neighbours(self):\n return []", "def get_further_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) > 1 or abs(x-i)+abs(y-j) == 0: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def neighbours((u,v)):\r\n return ((u,v+1), (u+1,v), (u,v-1), (u-1,v))", "def iter_node(self,i):\n nd = self.nodes[i]\n for kn in nd.get_close():\n # for kn in nd.get_known():\n # for kn in nd.neighbours:\n kn_node = self.nodes[kn.lindex]\n nd.add_known_nodes(kn.path_len,kn_node.get_close())", "def find_neighbors(self):\n x, y = self.position\n\n for i in range(3):\n for j in range(3):\n try:\n self.neighbors.append(self.stitches[(x - 1 + i, y - 1 + j)].position)\n except:\n pass\n\n # this cell will be added by default so we must delete at the end\n self.neighbors.remove(self.position)", "def list_neighbors(current_row, current_col, grid_size):\n neighbors = []\n for row_offset, col_offset in [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1)]:\n new_row = current_row + row_offset\n new_col = current_col + col_offset\n if (new_row >= 0 and new_row < grid_size and new_col >= 0\n and new_col < grid_size):\n neighbors.append((new_row, new_col))\n return neighbors", "def solvable(grid):\n y = x = 1\n stack = deque([(0, y, x,)])\n goal = len(grid) - 2\n found = np.ones_like(grid, dtype=bool)\n \n while stack:\n i, y, x = stack.popleft()\n i += 1\n for y2, x2 in solve_perfect.neighbors(y, x, grid):\n if found[y2, x2]:\n if y2 == goal and x2 == goal:\n return i\n else:\n found[y2, x2] = False\n stack.append((i, y2, x2,))\n \n return 0", "def checkNumNeighbors():", "def trace_neighbours(self, x, y):\r\n return list(filter(lambda n: n != None, (self.see_neighbour(x, y, i, j) for i in [-1, 0, 1] for j in [-1, 0, 1])))", "def add_neighbors(self, visited, parent):\n \n x = parent.x\n y = parent.y\n cost = parent.cost\n neighbors = []\n neighbor_grid = [(-1,1), (0,1), (1,1), (-1,0), (1,0), (-1,-1), (0,-1), (1,-1)]\n\n for idx in neighbor_grid:\n new_x = x + idx[0]\n new_y = y + idx[1]\n if self.valid_pos(new_x, new_y, visited):\n visited[new_y, new_x] = 1\n if self.valid_cost(x,y):\n new_cost = cost + np.linalg.norm(idx)*self.costmap[new_y, new_x]\n neighbors.append(self.new_node(new_x, new_y, new_cost, parent))\n\n return neighbors", "def _find_connected_tiles(self, row, col, non_empty_tiles_not_visited: set) -> None:\n\n non_empty_tiles_not_visited.remove((row, col))\n\n if (row > 0) and (self.board[row - 1][col] is not None) and ((row - 1, col) in non_empty_tiles_not_visited):\n self._find_connected_tiles(row - 1, col, non_empty_tiles_not_visited)\n if (\n (row < self.board_size - 1)\n and (self.board[row + 1][col] is not None)\n and ((row + 1, col) in non_empty_tiles_not_visited)\n ):\n self._find_connected_tiles(row + 1, col, non_empty_tiles_not_visited)\n if (col > 0) and (self.board[row][col - 1] is not None) and ((row, col - 1) in non_empty_tiles_not_visited):\n self._find_connected_tiles(row, col - 1, non_empty_tiles_not_visited)\n if (\n (col < self.board_size - 1)\n and (self.board[row][col + 1] is not None)\n and ((row, col + 1) in non_empty_tiles_not_visited)\n ):\n self._find_connected_tiles(row, col + 1, non_empty_tiles_not_visited)", "def get_neighbours(self, cell: Position) -> Iterable[Position]:\n x, y = cell\n\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y), (x + 1, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1),\n ]", "def recursive_backtrack(width=16, height=16) -> Maze:\n maze = Maze(width=width, height=height, algorithm=None)\n visited = [[False for _ in range(maze.width)] for _ in range(maze.height)]\n \n # ensure only one entrance to the center squares\n centerx = maze.width // 2 - 1\n centery = maze.height // 2 - 1\n \n visited[centery][centerx] = True\n visited[centery][centerx+1] = True\n visited[centery+1][centerx+1] = False\n visited[centery+1][centerx] = True\n\n def visit(x, y):\n visited[y][x] = True\n neighbors = maze.neighbors(x, y)\n random.shuffle(neighbors)\n\n for direction in neighbors:\n nx, ny = maze.neighbor(x, y, direction)\n if not visited[ny][nx]:\n maze.break_wall(x, y, direction)\n nx, ny = maze.neighbor(x, y, direction)\n visit(nx, ny)\n\n visit(0, 0)\n return maze", "def BFS(maze: list, start: tuple, goal: tuple):\n n = len(maze) # Get the dimension of the maze\n\n #========================================#\n # Some data checking statements\n\n if (not is_valid(start, n)):\n print(\"BFS: Start indices outside maze dimensions\")\n return False\n elif (not is_valid(goal, n)):\n print(\"BFS: Goal indices outside maze dimensions\")\n return False\n\n # End data checking statements\n #========================================#\n\n number_of_nodes_visited = 0\n visited = copy.deepcopy(maze) # We can use a copy of the maze to keep track of visited squares (Considered using a set here, thought that time efficiency was important)\n # visited = list(map(list, maze)) # Alternative to using copy.deepcopy\n\n # Initialize a matrix of the same size as maze where each value is None.\n previous = [[None for i in range(n)] for j in range(n)]\n\n queue = deque() # Define our queue of \"fringe\" squares\n queue.append(start) # Push the start square into our queue\n visited[start[0]][start[1]] = 1 # Set our start to visited\n\n while (len(queue)): # While there exists items in the queue\n current = queue.popleft() # Pop the square at index 0\n number_of_nodes_visited += 1 # Increase number of nodes visited\n\n if (current == goal): # If current is the goal, we found it!\n # We now want to traverse back to make a path using our 'previous' matrix\n path = []\n while (current != None):\n path.append(current)\n current = previous[current[0]][current[1]]\n path.reverse()\n return (True, path, number_of_nodes_visited)\n\n current_i, current_j = current # Unpack the current pair\n \n # Now we want to add all unvisited squares that are possible to get to from the current square\n for i in range(len(nearby_offsets)):\n offset_i, offset_j = nearby_offsets[i]\n possible = (current_i + offset_i, current_j + offset_j)\n # print(f\"Current possible: {possible_i} {possible_j}\") # DEBUG\n if (is_valid(possible, n)): # If the calculated square is within the maze matrix\n # If possible has not been visited yet\n if (not visited[possible[0]][possible[1]]):\n queue.append(possible) # Add possible to our queue\n # Set possible to visited\n visited[possible[0]][possible[1]] = 1\n # Set the previous square for possible to the current square\n previous[possible[0]][possible[1]] = current\n # If the while loop goes out, and the queue is empty, then there is no possible path\n return (False, [], number_of_nodes_visited)", "def make_inf_closed(self):\n X = self\n T = X.tree()\n for vertex in T.vertices():\n X.add_component(vertex)", "def wallsAndGates(self, rooms: List[List[int]]) -> None:\r\n if not rooms:\r\n return rooms\r\n \r\n ind_0s = []\r\n len_row = len(rooms)\r\n len_col = len(rooms[0])\r\n for i in range(len_row):\r\n for j in range(len_col):\r\n if rooms[i][j] == 0:\r\n ind_0s.append((i,j))\r\n que = ind_0s\r\n while que:\r\n i,j = que.pop()\r\n neighbors = [(i,j-1), (i-1,j), (i,j+1), (i+1,j)]\r\n if rooms[i][j] == 0:\r\n parent_distance = 0\r\n else:\r\n parent_distance = rooms[i][j]\r\n for x, y in neighbors:\r\n if x>=0 and x<len_row and y>=0 and y<len_col and rooms[x][y]!=-1 and rooms[x][y] and \\\r\n parent_distance+1 < rooms[x][y]:\r\n que.append((x,y))", "def cell_neighbours(self, x, y):\n if self.maze_map[y][x]:\n return set()\n neighbours = set()\n for (direction, ((i, j), dummy)) in MazeGraph.DIRECTIONS.items():\n xi, yj = (x + i) % self.width, (y + j) % self.height\n if not self.maze_map[yj][xi]:\n neighbours.add((direction, (xi, yj)))\n return neighbours", "def __getNeighbours(self, x: int, y: int) -> List:\n\t\tneighbours = []\n\t\tneighbours.append((x, y + 1))\n\t\tneighbours.append((x, y - 1))\n\t\tneighbours.append((x + 1, y))\n\t\tneighbours.append((x - 1, y))\n\t\tneighbours.append((x + 1, y + 1))\n\t\tneighbours.append((x - 1, y + 1))\n\t\tneighbours.append((x - 1, y - 1))\n\t\tneighbours.append((x + 1, y - 1))\n\n\t\tvalid_neighbours = [x for x in neighbours if x[0] > 0 and x[0] <= 5 and x[1] > 0 and x[1] <= 5]\n\n\t\treturn valid_neighbours", "def wallsAndGates(self, rooms: List[List[int]]) -> None:\r\n if not rooms or not rooms[0]:\r\n return\r\n \r\n \r\n m, n = len(rooms), len(rooms[0])\r\n q = deque([])\r\n for i in range(m):\r\n for j in range(n):\r\n if rooms[i][j] == 0:\r\n # 把所有为0坐标一次加入队列\r\n q.append((i,j))\r\n \r\n self.BFS(rooms, q)\r\n return", "def neighbours(pos):\r\n\t\tnbs = []\r\n\t\tfor direction in directions:\r\n\t\t\tnb = add(pos, direction)\r\n\t\t\tif is_inside(nb):\r\n\t\t\t\tnbs.append(nb)\r\n\t\treturn nbs", "def check_lost (grid):\r\n adjacent = False\r\n zero_value = False\r\n for i in range(4): \r\n for j in range(4):\r\n if grid[i][j] == 0:\r\n zero_value = True\r\n break\r\n for i in range(3):\r\n for j in range(3):\r\n if grid[i][j] == grid[i][j+1]:\r\n adjacent = True\r\n break\r\n if grid[i][j] == grid[i+1][j]:\r\n adjacent = True\r\n break\r\n if not adjacent and not zero_value:\r\n return True\r\n return False", "def get_neighbour(self, loc):\n y_lim, x_lim = np.shape(self.map)\n y, x = loc\n neighbour_cords = [(y - 1, x), (y + 1, x), (y, x - 1), (y, x + 1)]\n neighbour_cells = []\n for cords in neighbour_cords:\n curr_y, curr_x = cords\n if curr_y < 0 or curr_y >= y_lim:\n pass\n elif curr_x < 0 or curr_x >= x_lim:\n pass\n else:\n neighbour_cells.append(self.map[cords])\n\n return neighbour_cells" ]
[ "0.6919871", "0.6919871", "0.68459386", "0.6506167", "0.64691454", "0.615098", "0.61087185", "0.6104565", "0.60708123", "0.60645074", "0.60544246", "0.5988625", "0.592371", "0.5901404", "0.5862205", "0.5847411", "0.5810795", "0.5798435", "0.5754326", "0.57496643", "0.5704948", "0.5691", "0.5636125", "0.56204915", "0.56139606", "0.5602924", "0.5596358", "0.5587375", "0.55641055", "0.5562893", "0.55428034", "0.5542533", "0.5535823", "0.5532301", "0.5529254", "0.5524861", "0.5507511", "0.5495481", "0.5488497", "0.5481718", "0.5480417", "0.5473014", "0.5472992", "0.5467284", "0.546181", "0.5456719", "0.54511523", "0.5438695", "0.5437428", "0.5435252", "0.54154783", "0.5415418", "0.5392973", "0.5392054", "0.53919196", "0.5379508", "0.53602386", "0.5358696", "0.5357682", "0.5354626", "0.5354046", "0.5350558", "0.5346175", "0.53400797", "0.53394824", "0.53384906", "0.5329971", "0.5328179", "0.53206754", "0.531801", "0.53117394", "0.530985", "0.5306169", "0.5296687", "0.5295811", "0.5278684", "0.5278408", "0.527837", "0.5277796", "0.52729315", "0.527255", "0.52664864", "0.5261553", "0.52469575", "0.5245277", "0.5244017", "0.5233182", "0.52294886", "0.5228784", "0.5223859", "0.5222901", "0.52202535", "0.5206687", "0.52008694", "0.51966333", "0.5192003", "0.5189144", "0.51867175", "0.5183828", "0.5182531" ]
0.75434107
0
set a flag to the desired coordinates.
def flag(self, y, x): if self.table_state[y][x] == '-': self.table_state[y][x] = Minesweeper.FLAG Minesweeper.print_table(self.table_state)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setFlag(self, flag, value) -> None:\n ...", "def set_at(self,x,y,set=True):\n\t\tif ( not self._validate(x,y )):\n\t\t\treturn\n\n\t\t# set the bit in the grid\n\t\tif set:\n\t\t\tself.Grid[y] = self.Grid[y] | (1 << x)\n\t\telse:\n\t\t\tself.Grid[y] = self.Grid[y] & ~(1 << x)", "def set_flag(self, new):\n self.flag = new", "def set(self, y, x):\n\tif x<0 or self.X<=x or y<0 or self.Y<=y:\n\t raise ValueError, \"Coordinates out of range %i,%i\"% (y,x)\n\tself.state[x,y] = 1", "def setFlag(flagbyte, pos, status):\n if status:\n return flagbyte | 2**pos\n else:\n return flagbyte & ~2**pos", "def set_flag(self, flag_name, value):\n flags = {'C':0, # Carry\n 'Z':1, # Zero\n 'I':2, # Interrupt mask\n 'D':3, # Decimal\n 'B':4, # Break\n 'V':6, # Overflow\n 'N':7} # Negative\n\n flag_reg = self.get_register('P')\n if value == 1:\n new_flag = flag_reg | 1 << flags[flag_name]\n else:\n new_flag = flag_reg & ~(1 << flags[flag_name])\n\n self.set_register('P', new_flag)", "def set_flag(self, set_flag):\n\n self._set_flag = set_flag", "def setFlags(self, fixLonAcrossDateline, averageLonAtPole):\n LIB.mnt_grid_setFlags.argtypes = [POINTER(c_void_p), c_int, c_int]\n ier = LIB.mnt_grid_setFlags(self.obj,\n fixLonAcrossDateline,\n averageLonAtPole)\n if ier:\n error_handler(FILE, 'setFlags', ier)", "def flagCell(self, row, col):\n self.flagged[row, col] = 1", "def flag(self, flag: int, pixel=(slice(None), slice(None))):\n if self.mode != 'write':\n raise Exception(\"Must open file in write mode to do this!\")\n\n self.flags.valid(flag, error=True)\n if not np.isscalar(flag) and self._flagArray[pixel].shape != flag.shape:\n raise ValueError('flag must be scalar or match the desired region selected by x & y coordinates')\n self._flagArray[pixel] |= flag\n self._flagArray.flush()", "def flag(self, i, j):\n # Does not allow starting a game with a flag\n if not self.is_game_over and self.is_initialized:\n if not self.revealed[i, j]:\n self.flags[i, j] = not self.flags[i, j]\n self.flags_pts.set_data(*np.where(self.flags)[::-1])\n self.title_txt.set_text('{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))\n self.refresh_canvas()", "def __setitem__(self, pos, is_on):\n row, column = pos\n self.bits[row][column] = is_on", "def set_xy(self, x, y, val):\r\n\t\tself.grid[y, x] = val", "def setFlags(self, label, flags):\n\n label = checkLabel(label)\n try:\n ndim, dtype = flags.ndim, flags.dtype\n except AttributeError:\n flags = np.array(flags)\n ndim, dtype = flags.ndim, flags.dtype\n if ndim != 1:\n raise ValueError('flags.ndim must be 1')\n if dtype != bool:\n raise ValueError('flags.dtype must be bool')\n if len(flags) != self._n_atoms:\n raise ValueError('len(flags) must be equal to number of atoms')\n self._setFlags(label, flags)", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setDegreesFlag(newValue):\n global DegreesFlag\n DegreesFlag = newValue", "def mark_pos(self, position, marker):\n i, j = self.board[position]\n self.grid[i][j] = marker", "def flag_set(self, flag):\n if self.flags & flag != 0:\n return True\n else:\n return False", "def m_location_set(self, x: int, y: int):\n pass", "def toggle_xy(self, x, y):\r\n\t\tself.grid[y, x] = False if self.grid[y,x] else True", "def set_new_location(self, xPos, yPos):", "def set_flag(name,light0,light1,light2,light3): #TODO UID convert to int\n correct_range = range(4)\n assert light1 in correct_range, \"Error: input for light0 must be an integer between 0 and 3 inclusive\"\n assert light2 in correct_range, \"Error: input for light1 must be an integer between 0 and 3 inclusive\"\n assert light3 in correct_range, \"Error: input for light2 must be an integer between 0 and 3 inclusive\"\n assert light4 in correct_range, \"Error: input for light3 must be an integer between 0 and 3 inclusive\"\n name = _lookup(name)\n flag_data = list(name) + list(light1) + list(light2) + list(light3) + list(light4)\n mc.set('flag_values',flag_data)", "def setFlag(self, whichFlag, whichValue):\n \n try:\n if self.__debugOn == True:\n print(\"Flags in: %x\" %self.__flags)\n \n # Get temproary flag value that blanks out the flag.\n tFlag = (~whichFlag) & self.__flags\n \n # Set our flag to the given value.\n self.__flags = tFlag | whichValue\n \n if self.__debugOn == True:\n print(\"Flags out: %x\" %self.__flags)\n \n except:\n raise\n \n return", "def set(self,argument):\n if argument == \"X\" or \"O\":\n self.tile=argument", "def _add_flag(self, mbox, msgset, flag):\n self.select_mailbox(mbox, False)\n self._cmd(\"STORE\", msgset, \"+FLAGS\", flag)", "def set_coordinates(self, coordinates):\n self.coordinates = coordinates", "def setDesiredPosition(self, x, y):\n (self.setX, self.setY) = (x , y)", "def setCoords(self, p_float, p_float_1, p_float_2, p_float_3): # real signature unknown; restored from __doc__\r\n pass", "def eflags_set(self, bit: int, value: bool) -> None:\n if self.eflags_get(bit):\n if not value:\n self.eflags &= ~(1 << bit)\n else:\n if value:\n self.eflags |= 1 << bit", "def toggleFlag(self, event): \n clicked = event.widget\n if clicked.isInPlay(): self.changeSmile(1)\n value = clicked.setFlag()\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n adjTile.numFlags += value\n self.numFlags += value\n self.flagLabel.configure(text=\"Flags: \"+str(self.numFlags))", "def set_coordinates(self, x, y):\n self.x = x\n self.y = y", "def setLightPosition(lightID, x,y,z, coordmode='absolute'):\n vdict = {'absolute':'ABS','user':'USER','angle':'ANGLE'}\n dislin.litpos(lightID, x,y,z, vdict[coordmode])", "def set_point(self, lon=None, lat=None):\n changed = False\n if not self.point and lon and lat:\n self.point = Point(lon, lat)\n changed = True\n if changed:\n self.save()\n return changed", "def set_geofence(self, latitude, longitude):\n\t\tself.center = (latitude, longitude)\n\t\tself.center_coords = ogr.Geometry(ogr.wkbPoint)\n\t\tself.center_coords.AddPoint(*self.center)\n\t\tself.init_geofence()", "def setBit(self,i,boolval):\n self.boolVals[i]=boolval", "def set_square(self, x, y, mark):\n if self.board[x][y] == 0:\n self.board[x][y] = mark\n return True\n else:\n return False", "def set(self, coord, value):\n layer, row, column = tuple(coord)\n self.validPosition(layer, row, column)\n self._state['visible']['board'][layer][row][column] = value", "def set_coord(self, l, sign, b):\n if l is not None:\n self.l = float(l)\n if b is not None:\n self.b = float(b)\n if sign == '-':\n self.b *= -1.", "def SetToggle(self, flag):\n\n self.up = not flag\n self.Refresh()", "def setLayoutFlag(self, flag, on=True):\r\n if on:\r\n self.__data.layoutFlags |= flag\r\n else:\r\n self.__data.layoutFlags &= ~flag", "def set_mark( self, mark, index ):\n\n try:\n int(self.__grid[index-1])\n\n if mark.lower() == 'x' or mark.lower() == 'o': \n self.__grid[index-1] = mark\n\n return 1\n\n except ValueError:\n return 0", "def set_coordinates(self, coordinates, default=True):\n from sage.functions.trig import sin, cos\n from sage.symbolic.constants import pi\n\n states_without_coordinates = []\n for state in self.iter_states():\n try:\n state.coordinates = coordinates[state.label()]\n continue\n except (KeyError, TypeError):\n pass\n\n try:\n state.coordinates = coordinates(state.label())\n continue\n except TypeError:\n pass\n\n states_without_coordinates.append(state)\n\n if default:\n n = len(states_without_coordinates)\n for j, state in enumerate(states_without_coordinates):\n state.coordinates = (3*cos(2*pi*j/n),\n 3*sin(2*pi*j/n))", "def setflag(self, flag):\n\t\treturn pservlet.pipe_set_flag(self._pipe_desc, flag)", "def set_adressing(self, addr, set=True):\n assert addr in [self.ADDRESSING_HORIZ, self.ADDRESSING_VERT], \"Addressing must be ADDRESSING_HORIZ or ADDRESSING_VERT.\"\n self.addressing = addr\n if set:\n self._set_function()", "def mine_set(x, y, ty):\n click.echo('Set %s mine at %s,%s' % (ty, x, y))", "def test_setFlags(self):\n self._flagsTest('setFlags', b'FLAGS')", "def set(self, x, y):\n self.x = x\n self.y = y", "def setMask(self, mask):\n try:\n self.mask = mask\n self.inds = na.nonzero(self.mask.flat)[0]\n #print \"length of self.inds\",len(self.inds)\n #print self.inds\n self.dim = self.mask.shape[::-1]\n #print self.mask.shape\n return True\n except Exception as error:\n print(\"failed in setMask\", error)", "def set_flag_cells(self, cells):\n self.cells_flagged.add(cells)", "def set_location(self, location_set):", "def move(self, coord, mark):\n self.arr[coord] = mark", "def setPosition(position):", "def set_mask(self, mask):\n self.mask = mask", "def mark(self, x, y, autoreveal=True):\n\t\tif self.marks[x][y] == CLOSED:\n\t\t\tself.marks[x][y] = FLAG\n\t\t\tif autoreveal:\n\t\t\t\tfor (x2, y2) in self.get_valid_neighbors(x, y):\n\t\t\t\t\tself.auto_reveal(x2, y2)\n\t\telif self.marks[x][y] == FLAG:\n\t\t\tself.marks[x][y] = CLOSED", "def set_move(self, position: Point, mark: Mark) -> None:\n\t\tif mark == Mark.X:\n\t\t\tself.tiles[position.x][position.y] = 1\n\t\telse:\n\t\t\tself.tiles[position.x][position.y] = -1", "def setLocked( self, state = True ):\n self._xLocked = state\n self._yLocked = state", "def _setindicator(self, index: int, value: bool) -> None:\n bitmask = 1 << (index + 1)\n current = self._get_buffer(0x04)\n if value:\n self._set_buffer(0x04, current | bitmask)\n else:\n self._set_buffer(0x04, current & ~bitmask)\n if self._auto_write:\n self.show()", "def location(self, value: 'Point'):\n self.geometry.location = value", "def setPositionalMask(self, value):\n return self._set(positionalMask=value)", "def set_geo(self, lon=None, lat=None):\n changed = False\n if not lon:\n lon = self.lon\n if not lat:\n lat = self.lat\n\n if not self.point and lon and lat:\n self.point = Point(lon, lat)\n changed = True\n\n if not self.mpoly and lon and lat:\n self.mpoly = MultiPolygon(Polygon(((lon, lat),\n (lon, lat),\n (lon, lat),\n (lon, lat))))\n changed = True\n\n if changed:\n self.save()", "def setFlag(self):\n if self.inPlay and not self.shown:\n self.flag = not(self.flag)\n image_index = 11 if self.flag else 10\n self.configure(image = Tile.images[image_index])\n return 1 if self.flag else -1\n return 0", "def setMask(self, mask):\n self.mask = mask", "def setOccupation(point, occupation=False):\n grid[point[0]][point[1]][point[2]] = occupation", "def on_toggle_click(self, change):\n change = change[\"new\"]\n if change == \"Good\":\n self.set_mask_good()\n elif change == \"Bad\":\n self.set_mask_bad()\n elif change == \"Continuum\":\n self.set_mask_continuum()\n elif change == \"Line\":\n self.set_mask_line()", "def set_flags(self, flags):\n\n if isinstance(flags, int):\n if flags not in (0, 1, 2, 3):\n raise ValueError(\"Invalid flags: {}\".format(flags))\n\n self.raw.flags = flags\n\n elif isinstance(flags, Iterable):\n valid_flags = {\"DF\", \"MF\"}\n flags = set(flags)\n invalid_flags = flags.difference(valid_flags)\n\n if len(invalid_flags) > 0:\n raise ValueError(\"Invalid flags: {}\".format(invalid_flags))\n\n raw_flags = 0\n\n if \"DF\" in flags:\n raw_flags += 0b010\n\n if \"MF\" in flags:\n raw_flags += 0b001\n\n self.raw.flags = raw_flags\n\n else:\n msg = \"Expected flags to be int or iterable, got: {}\"\n raise TypeError(msg.format(type(flags).__name__))", "def setUp(self):\n premask = np.array([[0.0, 3.0, 2.0], [0.5, 0.0, 1.5], [0.2, 0.0, 0]])\n self.mask = np.ma.masked_where(premask > 1.0, premask)\n\n self.x_coord = DimCoord([1, 2, 3], long_name=\"longitude\")\n self.y_coord = DimCoord([1, 2, 3], long_name=\"latitude\")\n self.coords = [self.x_coord, self.y_coord]\n self.upper = 100.0\n self.lower = 0.0\n self.units = \"m\"", "def set_pin(self, xpos1, ypos1, xpos2, ypos2):\n\n distx = xpos1-xpos2\n disty = ypos1-ypos2\n\n if distx < 0 and disty == 0:\n self.def_field['direction'] = 'R'\n if distx > 0 and disty == 0:\n self.def_field['direction'] = 'L'\n\n if distx == 0 and disty > 0:\n self.def_field['direction'] = 'D'\n if distx == 0 and disty < 0:\n self.def_field['direction'] = 'U'\n\n self.def_field['length'] = int(math.sqrt(distx * distx\n + disty * disty))\n self.def_field['x'] = self.offset[0] + xpos1\n self.def_field['y'] = self.offset[1] + ypos1", "def setLane(self, offset):\n command = struct.pack(\"<Bf\", 0x2c, offset)\n self.sendCommand(command)", "def set(self, *options: str) -> int:\n self.flags |= self.mask(*options)\n return self.flags", "def on_VI_XY_set_clicked(self):\n # TODO: not implemented yet\n disp_coord()\n if qmdz_const.Auto_Range == 0:\n xmin = int(read_config(qmdz_const.SYS_CONF_PATH, 'COORD', 'x_min'))\n xmax = int(read_config(qmdz_const.SYS_CONF_PATH, 'COORD', 'x_max'))\n ymin = int(read_config(qmdz_const.SYS_CONF_PATH, 'COORD', 'y_min'))\n ymax = int(read_config(qmdz_const.SYS_CONF_PATH, 'COORD', 'y_max'))\n self.VI_MPL.change_xy(xmin, xmax, ymin, ymax)", "def set_coords(self,coords):\n [self.x,self.y,self.w,self.h] = coords", "def antenna_set(self):", "def set_pos(self, p, a, **kwargs):\n\t\treturn self.send(\"set_pos\", p[0], p[1], a, **kwargs)", "def setCell(self, (xIndex, yIndex)):\n changed = self.grid[xIndex][yIndex] == False\n self.grid[xIndex][yIndex] = True\n if changed:\n self.drawSquare((xIndex, yIndex))", "def set_2d_location(self, x, y):\r\n self.unif[42:44] = [x, y]", "def set_setpoint(self, value):\n act = SetpointAction(self, value)\n return act.invoke()", "def toggle_at(self,x,y):\n\t\tself.set_at(x,y,self.get_at(x,y) != 1)", "def setCoordinateResolution(*args):", "def setCoordinateResolution(*args):", "def setCoordinateResolution(*args):", "def setCoordinateResolution(*args):", "def set(self, flag: int, value: int):\n if flag == cv2.cv2.CAP_PROP_POS_FRAMES:\n self.buff_idx = value", "def set_pixel(self, x, y, value):\r\n \r\n # Rotation and mirroring\r\n a = x\r\n x = y\r\n y = 7-a\r\n \r\n # From the baseclass\r\n if x < 0 or x > 7 or y < 0 or y > 7:\r\n # Ignore out of bounds pixels.\r\n return\r\n # Set green LED based on 1st bit in value.\r\n self.set_led(y * 16 + x, 1 if value & Display.COLOR_GREEN > 0 else 0)\r\n # Set red LED based on 2nd bit in value.\r\n self.set_led(y * 16 + x + 8, 1 if value & Display.COLOR_RED > 0 else 0)", "def toggle_flag(self, loc: tuple[int, int]) -> None:\n if self.game_over or self.field[loc].is_naked:\n return\n\n if self.field[loc].is_flagged:\n self.field[loc].un_flag()\n self.mines_left += 1\n else:\n self.field[loc].flag()\n self.mines_left -= 1\n\n if self.auto_solving.get():\n block = Block(self.field, loc)\n useful_neighbors = {neighbor for neighbor in block.naked_neighbors\n if Block(self.field, neighbor).unknown_neighbors}\n [self.hyper_queue.remove(cell) for cell in useful_neighbors]\n self.auto_queue.add_batch(useful_neighbors,\n emphasis=self.emphasis[\"add_batch\"],\n color=\"new_auto\")\n self._auto_spark()", "def set_pixel(self, x, y, v):\n self.buf[y][x] = v & 0x07", "def doOptPoint(opnt):\n s.setScriptBool(odi.INDX_BOOL_DO_OPT_POINT, opnt)\n s.setScriptBool(odi.INDX_BOOL_DO_OPT_POINT_SET, True)", "def set_points(self, val=None):\r\n self._points = self.nx*self.ny*self.nz", "def setCoords(self, coords):\n\n self.coords = coords", "def add_point(self, x: int, y: int):\n self.state[x, y] = 1", "def add_point(self, x: int, y: int):\n self.state[x, y] = 1" ]
[ "0.68833566", "0.688153", "0.68004435", "0.65121055", "0.64788616", "0.6427263", "0.6359339", "0.6310714", "0.6294104", "0.62276906", "0.6186711", "0.618563", "0.61684364", "0.6153594", "0.61314297", "0.61314297", "0.61314297", "0.61314297", "0.61314297", "0.61314297", "0.61314297", "0.61314297", "0.61314297", "0.61314297", "0.61314297", "0.61313134", "0.609246", "0.60850143", "0.60794115", "0.6075836", "0.60729593", "0.6038429", "0.5957808", "0.59467846", "0.59414387", "0.59260476", "0.5919075", "0.59089994", "0.5893423", "0.5888983", "0.5819662", "0.5819302", "0.5814947", "0.5795887", "0.57779557", "0.57772523", "0.5774692", "0.5773961", "0.5768644", "0.57655156", "0.5720743", "0.57040346", "0.5700258", "0.5697043", "0.5695451", "0.56908256", "0.5688482", "0.56809664", "0.56667733", "0.5661731", "0.5650115", "0.5648398", "0.5647565", "0.5637016", "0.56354654", "0.56183666", "0.5604211", "0.55824095", "0.55777395", "0.5567137", "0.55648595", "0.5554415", "0.55479133", "0.55477464", "0.55376214", "0.55374193", "0.5532043", "0.55293345", "0.5526624", "0.550945", "0.5506981", "0.5506024", "0.55054593", "0.5502884", "0.5501935", "0.5501463", "0.5499954", "0.5490445", "0.5490445", "0.5490445", "0.5490445", "0.54831326", "0.54736865", "0.5465322", "0.5459973", "0.54453176", "0.54345536", "0.54093343", "0.54082197", "0.54082197" ]
0.5479736
92
come here when the coordinates do not have a bomb. update the table_state with the selected coordinate.
def tease_user(self, y, x): self.table_state[y][x] = self.final_table[y][x] # if there are no neighbouring 0s, open neighbours if self.table_state[y][x] == '0': self.open_neighbours(y, x) self.print_table(self.table_state)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def click_cell(self, event):\n if (self.world_setable):\n x, y = event.x, event.y\n row = y / self.cell_size\n col = x / self.cell_size\n if ((row in range(self.cell_row)) and\n (col in range(self.cell_col))):\n status_now = not self.world_status.now[row, col]\n if (status_now):\n color = self.color_alive\n else:\n color = self.color_dead\n item_id = self.world[row, col]\n self.canvas.itemconfig(item_id, fill=color)\n self.world_status.now[row, col] = status_now\n self.world_status.next = self.world_status.now.copy()\n self.init_world = self.world_status.now.copy()", "def create_foothold(self):\n sel = self.selected()\n cell = sel[0]\n if cell.contents == Contents.bomb:\n cell.contents = Contents.empty\n for adj in cell.get_adjacent():\n if adj.contents == Contents.bomb:\n adj.contents = Contents.empty\n self.set_bomb_contacts()", "def mouseClick(self, event):\n if self.editMode:\n self.applyEditing(event)\n self.clearEditCursor(event)\n return\n x = (event.y - self.margin) // self.cellSize\n y = (event.x - self.margin) // self.cellSize\n if self.checkFree(x, y) == self.colors['busy']:\n return # clicked busy position\n self.onBoard += 1\n self.refreshScore()\n self.history.append((\n self.setBusy(x, y),\n self.addPentomino(x, y)\n ))\n if self.onBoard == self.expectedBest:\n self.gameOver()", "def update_state(self):\n self.reset_state()\n for piece in self.pieces:\n coordinates = piece.get_block_positions()\n for coor in coordinates:\n x, y = coor\n self.state[y][x] = piece", "def table_move_update():\n pos = self.variables.table.get_current_position()\n self.table_move_ui.x_move.setProperty(\"value\", int(pos[0]))\n self.table_move_ui.y_move.setProperty(\"value\", int(pos[1]))\n self.table_move_ui.z_move.setProperty(\"value\", int(pos[2]))", "def change_cell(self, event):\n try:\n (x, y) = self.get_id_from_coor(event.x, event.y)\n if self._board[x][y]:\n self._board[x][y] = False\n else:\n self._board[x][y] = True\n if self._board[x][y]:\n self.canvas.itemconfig(self.rect[y,x], fill=self._secondary_color)\n else:\n self.canvas.itemconfig(self.rect[y,x], fill=self._primary_color)\n except KeyError:\n pass # tkinter bug", "def pick(self, obj_height_from_table):\n init_x = self.x\n init_y = self.y\n init_z = self.z\n obj_z = self.table_z + obj_height_from_table*self.disk_height\n \n #open gripper\n self.gripper.command_position(100)\n \n #drop to given height\n self.move_to(init_x, init_y, obj_z)\n \n #close gripper\n self.gripper.command_position(0)\n \n #return to initial position\n self.move_to(init_x, init_y, init_z)", "def setCell(self, (xIndex, yIndex)):\n changed = self.grid[xIndex][yIndex] == False\n self.grid[xIndex][yIndex] = True\n if changed:\n self.drawSquare((xIndex, yIndex))", "def selection_board_maintenance(self,x_cor,y_cor):\t\t\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit() \r\n\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\t\t#print(\"mouse is pressed\")\r\n\t\t\t\t#everything begins here\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\t\t\t\t#print(who_is_clicked)\r\n\t\t\t\tif (self.selected_from_selection_bar + self.selected_from_board):\r\n\t\t\t\t\t#print(\"inside selected item one\")\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_board = False\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =Helping_Class.selection_bar_reverse_mapping[piece]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t#print(\"nothing is selected\")\r\n\t\t\t\t\t#check if clicked on his piece change then select it\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t#print(self.selected_piece,self.selected_position,self.selected_from_selection_bar)\r\n\r\n\t\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t#color change\r\n\t\t\t\t#who_is_clicked is dummy variable as no click has occurred\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\r\n\t\t\t\tself.blit_piece = [(x_adjusted,y_adjusted),piece]", "def flag(self, y, x):\n if self.table_state[y][x] == '-':\n self.table_state[y][x] = Minesweeper.FLAG\n Minesweeper.print_table(self.table_state)", "def make_cell_change(self, x, y):\n self.cells[x][y] = 1 if not self.cells[x][y] else 0", "def update_board(self, coordinate, hit):\n \n if hit:\n self.board_state[coordinate.row_idx][coordinate.col_idx] = \"H\"\n else:\n self.board_state[coordinate.row_idx][coordinate.col_idx] = \"M\"", "def dirty_squares(self) -> None:\n row = ran(0, self.__squares.__len__() - 1)\n column = ran(0, self.__squares[0].__len__() - 1)\n self.__squares[row][column] = Floor._dirty\n print(\"Ensuciamos el piso y quedo así: \", self.__str__())", "def _set_state_coordinates(atomic_entity, width, height):\n state_entity = atomic_entity.get(\"children\")[0]\n parent_coor = atomic_entity[\"coordinates\"]\n state_entity[\"coordinates\"] = {\n \"x\": parent_coor[\"x\"] + (parent_coor[\"width\"] - width) / 2,\n \"y\": parent_coor[\"y\"] - (height / 2),\n \"width\": width,\n \"height\": height,\n }", "def main_board_maintenance(self,x_cor,y_cor):\r\n\t\r\n\t\tfor event in pygame.event.get(): \r\n\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit()\r\n\t\t\t\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\r\n\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t#print(x_adjusted/80,y_adjusted/80)\r\n\r\n\t\t\t\tif self.selected_from_selection_bar :\r\n\t\t\t\t\t#print('inside selection bar selection option')\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\ttemp_game_state = CP.game_data()\r\n\t\t\t\t\ttemp_game_state = copy.deepcopy(self.game_state)\r\n\t\t\t\t\tdata_convert = CP.Conversion_of_postion_name(self.selected_piece,Helping_Class.selection_bar_reverse_mapping[self.selected_piece] ,(x_adjusted,y_adjusted))\r\n\t\t\t\t\ttemp_game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani()))\r\n\t\t\t\t\ttemp_game_state.active_color = not temp_game_state.active_color\r\n\t\t\t\t\tfen = temp_game_state.generate_fen()\r\n\t\t\t\t\tboard2 = chess.Board(fen=fen)\r\n\t\t\t\t\tprint(board2)\r\n\t\t\t\t\tprint(fen)\r\n\t\t\t\t\tprint('board2.is_check()',board2.is_check())\r\n\t\t\t\t\t\r\n\t\t\t\t\t#now we need to place the piece on board\r\n\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)] == None:\r\n\t\t\t\t\t\t#print(self.selected_position)\r\n\t\t\t\t\t\tif not board2.is_check():\r\n\t\t\t\t\t\t\tif self._check_valid_position_(x_adjusted,y_adjusted):\r\n\t\t\t\t\t\t\t\tself.place_piece_on_board_from_selection_bar(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t\t#rajan's\r\n\t\t\t\t\t\t\t\t#print(self.selected_piece)\r\n\t\t\t\t\t\t\t\t#print(self.selected_position)\r\n\t\t\t\t\t\t\t\tdata_convert = CP.Conversion_of_postion_name(self.selected_piece,self.selected_position ,(x_adjusted,y_adjusted))\r\n\t\t\t\t\t\t\t\tself.game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani()))\r\n\t\t\t\t\t\t\t\tself.selected_piece = None\r\n\t\t\t\t\t\t\t\tself.selected_position = None\r\n\r\n\t\t\t\t\t\t\t\tself.computer_turn =True\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t#board position is filled then nothing to do\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#if his piece change selection\r\n\t\t\t\t\t\tself.selected_from_selection_bar =False\r\n\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\r\n\r\n\t\t\t\telif self.selected_from_board:\r\n\t\t\t\t\t#print('inside selection bar board option')\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\t\r\n\t\t\t\t\tomega = True\r\n\t\t\t\t\tif self.selected_position:\r\n\t\t\t\t\t\tif self.selected_position == (x_adjusted,y_adjusted):\r\n\t\t\t\t\t\t\tomega = False\r\n\t\t\t\t\t#print(self.selected_position,(x_adjusted,y_adjusted))\r\n\t\t\t\t\tif omega:\r\n\t\t\t\t\t\tmove = self._check_valid_move_(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\tprint(move)\r\n\t\t\t\t\tif omega:\r\n\t\t\t\t\t\tif move:\r\n\t\t\t\t\t\t\tself.computer_turn = True\r\n\t\t\t\t\t\t\t#if move contains x then we have update state of captured piece\r\n\t\t\t\t\t\t\t#else just update selected piece\r\n\t\t\t\t\t\t\t#print(\"correct move\")\r\n\t\t\t\t\t\t\tself.capture_piece_update_board_or_place_piece(move,x_adjusted,y_adjusted)\r\n\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t#select the piece\r\n\t\t\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\t\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]:\r\n\t\t\t\t\t\t#select the piece\r\n\t\t\t\t\t\tif self.whose_move == 'white':\r\n\t\t\t\t\t\t\tif 'W' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\telif self.whose_move == 'black':\r\n\t\t\t\t\t\t\tif 'B' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#it is none means nothing is their so nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\r\n\t\t\t\r\n\r\n\t\t\telse:\r\n\t\t\t\t#print(\"not_pressed\")\r\n\t\t\t\tpass", "def mover_bm_izquierda(self):\n self.nueva_posicion_posible_parte_superior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] - 1,\n self.casilla[1]],\n [self.vertice_1[0] - self.velocidad,self.vertice_1[1]], \n [self.vertice_1[0] - 5 - 5, self.vertice_1[1]])\n self.nueva_posicion_posible_parte_inferior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] - 1,\n self.casilla[1] + 1],\n [self.vertice_3[0] - self.velocidad,self.vertice_3[1]],\n [self.vertice_3[0] - 5,self.vertice_3[1]]) \n if self.nueva_posicion_posible_parte_superior[0] != 1 and self.nueva_posicion_posible_parte_inferior[0] != 1:\n self.x -= self.velocidad * (self.x >= 15)\n self.posicion = [self.x,self.posicion[1]]\n self.casilla = [self.casilla[0] - self.nueva_posicion_posible_parte_superior[1] *(self.nueva_posicion_posible_parte_inferior[0] != 1) * (self.nueva_posicion_posible_parte_superior[0] != 1), self.casilla[1]]\n self.redefinir_vertices()", "def update_cell(self, x, y, value):\n x1, y1 = self.transpose_coordinates(x, y)\n if self.is_in_field(x1, y1):\n self._cells[y1][x1] = value\n return True\n return False", "def change_cell(self):\n\n x, mu = self.update_position_direction(self.l_edge)\n mu_mean = self.calculate_mean_mu(self.x, x, self.l_edge)\n self.update_estimators(self.l_edge, mu_mean)\n\n if self.next_cell_index == self.grid.Ncells:\n # packet escapes\n self.is_escaped = True\n self.is_active = False\n self.mu = mu\n self.x = self.cell_xr\n\n elif self.next_cell_index == -1:\n\n raise GeometryException(\"No inner boundary in homogeneous sphere\")\n\n else:\n # packet is transported into target cell\n\n self.mu = mu\n\n if self.next_cell_index > self.cell_index:\n # packet is moved one cell to the right\n\n self.x = self.grid.xl[self.next_cell_index]\n\n else:\n # packet is moved one cell to the left\n\n self.x = self.grid.xr[self.next_cell_index]\n\n # reset cell-based properties for easy access\n self.cell_index = self.next_cell_index\n self.cell_chi = self.grid.chi[self.cell_index]\n self.cell_xl = self.grid.xl[self.cell_index]\n self.cell_xr = self.grid.xr[self.cell_index]\n self.cell_dx = self.grid.dx[self.cell_index]\n self.cell_dV = self.grid.dV[self.cell_index]\n\n # recalculate distances\n self.calculate_and_set_propagation_distances()", "def _check_event(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n sys.exit()\n elif event.key == pygame.K_SPACE:\n self.waiting = not self.waiting\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if self.waiting:\n x,y = pygame.mouse.get_pos()\n cell_addr_y = int(y/self.cell_width)\n cell_addr_x = int(x/self.cell_width)\n self.cells[cell_addr_y][cell_addr_x].update()", "def check_position():\n if self.variables.table:\n pos = self.variables.table.get_current_position()\n position_update()", "def move(self):\r\n if self.d == 'NORTH' and (self.y + 1) <= table_max_y:\r\n self.y += 1\r\n elif self.d == 'EAST' and (self.x + 1) <= table_max_x:\r\n self.x += 1\r\n elif self.d == 'SOUTH' and (self.y - 1) >= 0:\r\n self.y -= 1\r\n elif self.d == 'WEST' and (self.x - 1) >= 0:\r\n self.x -= 1\r\n else:\r\n print(\"Edge of Table Reached!\")", "def setUpBombs(self, event):\n pos = (event.widget.row * self.cols) + event.widget.col\n size = self.rows * self.cols\n \n #get a list random indexes in range to be mines\n mines = random.sample(range(size), self.numMines)\n if pos in mines:\n mines.remove(pos)\n temp = random.sample(range(size), 1)[0]\n while (temp == pos): temp = random.sample(range(size), 1)[0]\n mines.append(temp)\n \n #mark all mine squares as mines\n for mine in mines:\n targetRow = int(mine/self.cols)\n targetCol = mine % self.cols\n self.tiles[targetRow][targetCol].setMine()\n\n #calculate the number in each Square of the current game\n for row in self.tiles:\n for tile in row:\n if not tile.isMine():\n counter = 0\n for adjTile in self.getAdjacentTiles(tile.row,tile.col):\n if adjTile.isMine(): counter += 1\n tile.setCount(counter)\n \n self.minesArmed = True\n self.startTime = time.time()\n return 1", "def result(self, state, action):\n\n # blank is the index of the blank square\n blank = self.find_blank_square(state)\n new_state = list(state)\n\n delta = {'UP': -3, 'DOWN': 3, 'LEFT': -1, 'RIGHT': 1}\n neighbor = blank + delta[action]\n new_state[blank], new_state[neighbor] = new_state[neighbor], new_state[blank]\n\n return tuple(new_state)", "def _check_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n alive_neighbours = self._get_neighbours(row_number,col_number)\n \n self.to_be_updated[row_number][col_number] = False\n if self.cells[row_number][col_number].get_status():\n if alive_neighbours < 2:\n self.to_be_updated[row_number][col_number] = True\n elif alive_neighbours > 3:\n self.to_be_updated[row_number][col_number] = True\n else:\n if alive_neighbours == 3:\n self.to_be_updated[row_number][col_number] = True", "def flag_cell(self, event):\n if self.mineboard.gamestate is None:\n x = (event.x-2) // CELLWIDTH\n y = (event.y-2) // CELLWIDTH\n self.mineboard.flag_cell(y, x)\n self.update_cells()\n mines_rem = self.mineboard.minecount - self.mineboard.flagcount\n # updates the mines_left label\n if mines_rem == 1:\n self.mines_left.set(f\"{mines_rem} mine left\")\n else:\n self.mines_left.set(f\"{mines_rem} mines left\")", "def setBusy(self, x, y):\n changes = []\n for i in range(self.numPieces):\n new_x = x + self.pos[self.rotation][i][0]\n new_y = y + self.pos[self.rotation][i][1]\n changes.append((new_x, new_y))\n self.gridBusy[new_x][new_y] = self.onBoard\n self.correctPending()\n return changes", "def update(self, new_state):\n\n if self.update_animation:\n self.canvas.delete(self.agent)\n row, col = new_state\n x1 = col * self.GRID_ROW_HEIGHT\n y1 = row * self.GRID_ROW_HEIGHT\n self.agent = self.canvas.create_image(x1 + self.GRID_ROW_HEIGHT / 2, y1 + self.GRID_ROW_HEIGHT / 2,\n image=self.penguin)", "def update_state_v1(self, dbsession, state):\n moving_player = self.active_player()\n board = self.update(state)\n table_game = dbsession.query(TableGame).filter(\n TableGame.game == board.id).first()\n table_board = TableBoard(\n board_state=dumps(tuple(map(tuple, board.board))),\n move_num=board._board.move_count,\n player=board.active_player(),\n game=board.id)\n if table_game: # TODO(grandquista)\n table_board.game_link.append(table_game)\n dbsession.add(table_board)\n if board:\n board.poke_player(False)\n return {'end': False}\n board.poke_player(True, moving_player)\n if board._board.has_kings():\n table_game.one_won = False\n table_game.two_won = False\n elif moving_player == table_game.player_one:\n table_game.two_won = False\n else:\n table_game.one_won = False\n board.close()\n return {'end': True}", "def in_cell(self):\n for player in self.players:\n for cell in self.cell_lst:\n if player.x in cell[0] and player.y in cell[1]:\n player.current_cell = cell\n break", "def select_small_cell(self, event):\n row = (self.small_yoffset - event.y) / self.small.height\n row = fmsgeom.Small.nrows() - 1 - row\n if event.x > self.xoffset:\n column = (event.x - self.xoffset) / self.small.width\n else:\n column = (self.xoffset - event.x) / self.small.width\n detector = NORTH_SMALL\n if event.x > self.xoffset:\n detector = SOUTH_SMALL\n cell = self.detectors[detector].get_cell(row, column)\n if cell is not None:\n d = dialog.CellDialog(self.tkroot,\n self.dialog_title(cell), cell)\n # Update the overall programme modified state.\n self.modified = d.modified or self.modified\n self.canvas.focus_set()", "def _update_farness_map(self,ind):", "def state_update():\n if self.variables.table:\n if (\n self.variables.default_values_dict[\"settings\"][\"Table_state\"]\n and not table_indicator.text() == \"UP\"\n ):\n table_indicator.setStyleSheet(\n \"background : rgb(0,255,0); border-radius: 25px\"\n )\n table_indicator.setText(\"UP\")\n\n elif (\n not self.variables.default_values_dict[\"settings\"][\"Table_state\"]\n and not table_indicator.text() == \"DOWN\"\n ):\n table_indicator.setStyleSheet(\n \"background : rgb(255,0,0); border-radius: 25px\"\n )\n table_indicator.setText(\"DOWN\")", "def flagCell(self, row, col):\n self.flagged[row, col] = 1", "def update_grid(self):\n if self.game_over:\n return\n if self.active_piece is None:\n self.place_new_piece()\n if self.piece_collision_exists(self.active_piece):\n self.handle_active_piece_collision()\n self.place_new_piece()\n self.shift_cells(self.active_piece, self.current_direction)\n self.active_piece = TransformPiece.shift_coordinates(self.active_piece, self.current_direction)\n self.merge_with_completed_rows()\n if self.is_game_won():\n self.game_over = True", "def edit_current_cell(self):\n cells = self.csv_data_table.selectionModel().selectedIndexes()\n if len(cells) == 1:\n for cell in sorted(cells):\n r = cell.row()\n c = cell.column()\n self.csv_data_table.editItem(self.csv_data_table.item(r, c))", "def swap_states(matrix, floor, xaxis, rooms):\n for room in rooms:\n try:\n column = room\n row = 1\n while column > xaxis:\n column -= xaxis\n row += 1\n\n row -=1\n column -=1\n\n if column == -1:\n # catches a case when the user enters 0 as a room number.\n # would originally toggle the last element on the first row.\n continue\n\n if \"Y\" in matrix[floor][row][column]:\n matrix[floor][row][column] = matrix[floor][row][column].replace(\"Y\", \"\")\n else:\n matrix[floor][row][column] = \"Y\" + matrix[floor][row][column]\n # Row and column now contain the coordinates of the room.\n except IndexError:\n print()\n print(room,\"isn't one of the room numbers.\")\n print(\"Press enter keep working.\")\n input()\n return matrix", "def dirty_square(self, row: int, column: int) -> None:\n self.__squares[row][column] = Floor._dirty", "def upload_point(x, y, label=\"\"):\n\n conn = None\n cur = None\n\n try:\n # check the point is inside the usa, both point and states must be WGS84\n conn = utils.pgconnect(**settings.DEFAULT_CONNECTION)\n cur = conn.cursor()\n #if the point is inside this will return (True,) otherwise None\n cur.execute(\"\"\"select result from\n (select st_contains(s.geom,ST_GeomFromText('POINT(%s %s)', 4326)) as result \n from %s as s) as subquery\n where result is true\"\"\",(AsIs(x),AsIs(y), AsIs(settings.STATES_TABLE_NAME)))\n\n result = cur.fetchone()\n #print(result)\n\n if result: # if result is not None\n\n #check numbers size, crop to 4 digits, define the marker size\n\n # size symbol\n size=None\n\n # store number of decimal digits\n lx = 0\n ly = 0\n\n # convert numbers to string\n #x = str(x);y = str(y)\n\n if ',' in x or ',' in y:\n raise Exception(\"decimal numbers should not contain ','\")\n\n # check the number of decimal digits and crop to 4\n if '.' in x: # do only for float number\n lx = len(x.split('.')[1]) # get decimals\n if lx > 4: # crop size to 4\n x = x[:(4 - lx)]\n lx = 4\n if '.' in y: # do only for float number\n ly = len(y.split('.')[1])\n if ly > 4:\n y = y[:(4 - ly)]\n ly = 4\n\n # select a symbol size according\n # for the size take the bigger number of digits of the two numbers\n ndigits = max([lx, ly])\n if ndigits == 0:\n size = 5\n elif ndigits == 1:\n size = 4\n elif ndigits == 2:\n size = 3\n elif ndigits == 3:\n size = 2\n elif ndigits == 4:\n size = 1\n\n #upload to database\n cur.execute(\n \"\"\"INSERT INTO %s(lat,lon,label,size) VALUES (%s,%s,%s,%s) RETURNING id\"\"\",\n ( AsIs(settings.BOOKMARKS_TABLE_NAME), y, x, label, size))\n #id = cur.fetchone()[0]\n #print(id)\n cur.execute(\"\"\"UPDATE %s SET geom = ST_PointFromText('POINT(' || lon || ' ' || lat || ')', 4326)\"\"\", (AsIs(settings.BOOKMARKS_TABLE_NAME),))\n conn.commit()\n\n else:\n raise Exception(\"the point is not inside USA\")\n\n except Exception as e:\n raise Exception(e)\n\n else:\n return x, y, size #return the cropped coordinates and marker size\n\n finally:\n if cur: cur = None\n if conn: conn = None", "def set(self, y, x):\n\tif x<0 or self.X<=x or y<0 or self.Y<=y:\n\t raise ValueError, \"Coordinates out of range %i,%i\"% (y,x)\n\tself.state[x,y] = 1", "def update_cells(self, state):\n width = WIDTH / CELL_SIZE\n height = HEIGHT / CELL_SIZE\n\n for index in range(0, width * height):\n if state[index] != self.get_state(index):\n self.toggle_color(index)", "def handle_event(self, coord_x, coord_y):\n j, i = utl.pixel_coords_to_pos(coord_x, coord_y, self.maze_size)\n if self.level.in_maze(i, j):\n if self.level.maze[i][j] == ' ':\n self.goto(coord_x, coord_y)\n elif self.level.maze[i][j] == 'P':\n self.goto(coord_x, coord_y)\n elif self.level.maze[i][j] == 'E':\n self.goto(coord_x, coord_y)\n self.end_game()\n elif self.level.maze[i][j] == 'T':\n self.score += 1\n self.goto(coord_x, coord_y)\n self.level.maze[i][j] = ' '\n self.treasures[(coord_x, coord_y)].destroy()", "def __init__(self, row=None, column=None, mark=None):\n self.row = row\n self.column = column\n self.occupied = False\n self.mark = mark\n self.hit_count = 0\n self.ship = None", "def test_location_to_state():\n for num_rows in [12, 10]:\n for num_cols in [15, 9]:\n env = Four_Rooms_Environment(grid_width=num_cols, grid_height=num_rows)\n observed_states = set()\n for row in range(num_rows):\n for col in range(num_cols):\n state = env.location_to_state((row, col))\n assert state not in observed_states\n observed_states.add(state)", "def test_set_cell_south(mock_amg):\n\n # change the neighbour to the south.\n # this is not the correct neighbour\n mock_amg.cells[4].south = mock_amg.cells[2]\n assert mock_amg.cells[4].south == mock_amg.cells[2]", "def transition_to(self, state: cell_state):\n self._state = state\n self._state.cell = self", "def create_objects(cls, table):\n x = 2\n state = State(table[1][4])\n while x < len(table):\n line = table[x]\n if line[5] == \"powiat\" or line[5] == \"miasto na prawach powiatu\":\n county = County(line[4], line[1])\n state.in_state(county)\n elif line[5] == \"miasto\":\n city = City(line[4], line[1], line[2])\n state.in_state(city)\n elif line[5] == \"gmina miejska\":\n city_community = City_Community(line[4], line[1], line[2])\n state.in_state(city_community)\n elif line[5] == \"gmina wiejska\":\n village_community = Village_Community(line[4], line[1], line[2])\n state.in_state(village_community)\n elif line[5] == \"gmina miejsko-wiejska\":\n city_village_community = City_Village_Community(line[4], line[1], line[2])\n state.in_state(city_village_community)\n elif line[5] == \"obszar wiejski\":\n village_square = Village_square(line[4], line[1], line[2])\n state.in_state(village_square)\n elif line[5] == \"delegatura\":\n delagacy = Delegacy(line[4], line[1], line[2])\n state.in_state(delagacy)\n x+=1\n\n for county in state.in_s:#adding community objects to a proper county\n if type(county) == County:\n for community in state.in_s:\n if community.county_number == county.county_number and type(community) != County:\n county.in_county(community)\n\n return state", "def click(self, event):\n x = self.ptgrid(event.x)\n y = self.ptgrid(event.y)\n \n # x = loc[0]\n # y = loc[1]\n\n # if self.gamestate == self.STATE_TITLE_SCREEN:\n # self.new_board()\n # self.gamestate = FIRST_PLAYER\n\n\n #duplication /!\\\n if (self.board[y][x] == self.EMPTY and self.p2pGame.isReady):\n if(self.p2pGame.playerTurn == 'X' and self.player == 1):\n self.new_move(x, y, self.player)\n\n if self.has_won(self.player):\n self.gamestate = self.STATE_GAME_OVER\n if self.player == 1:\n self.gameover_screen('X Gagne')\n data = \"--W:X\"\n else:\n self.gameover_screen('O Gagne')\n data = \"--W:O\"\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n\n\n elif self.is_a_draw():\n self.gamestate = self.STATE_GAME_OVER\n self.gameover_screen('Egalité')\n data = \"--D\"\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n\n else:\n data = \"--X:\"+ str(x) + \":\" + str(y)\n self.p2pGame.playerTurn = 'O'\n self.p2pGame.sendTicTacToeData(text=data)\n # self.gamestate = self.STATE_O_TURN\n #self.launch()\n elif(self.p2pGame.playerTurn == 'O' and self.player == 2):\n self.new_move(x, y, self.player)\n\n if self.has_won(self.player):\n self.gamestate = self.STATE_GAME_OVER\n if self.player == 1:\n self.gameover_screen('X Gagne')\n data = \"--W:X\"\n else:\n self.gameover_screen('O Gagne')\n data = \"--W:O\"\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n\n\n elif self.is_a_draw():\n self.gamestate = self.STATE_GAME_OVER\n self.gameover_screen('Egalité')\n data = \"--D\"\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n\n else:\n data = \"--O:\"+ str(x) + \":\" + str(y)\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n # self.gamestate = self.STATE_O_TURN\n #self.launch()\n elif self.gamestate == self.STATE_GAME_OVER:\n #reset\n self.new_board()\n self.gamestate = self.FIRST_PLAYER\n self.p2pGame.sendPlayAgain(\"--A\")", "def reveal_cell(self, event):\n x = (event.x-2) // CELLWIDTH\n y = (event.y-2) // CELLWIDTH\n if self.mineboard.gamestate is None:\n self.mineboard.reveal_cell(y, x)\n self.update_cells()", "def place_obj(self):\r\n for pos in BOARD_POSITIONS:\r\n self.board[pos[0]][pos[1]] = Stone(color=self.state[pos[0]][pos[1]], pos=(pos[0], pos[1]))\r\n self.board[pos[0]][pos[1]].liberty = self.board[pos[0]][pos[1]].compute_liberty(self.state)", "def init(self, windowsize:tuple):\r\n y_count, x_count = 3, 0 #< Set the starting counter for the look_up_table. y starts with three because the first three lines are just Nones\r\n # Creating the constant maze \r\n maze_size = windowsize[0], windowsize[1] - 2 * self.grid_size\r\n self.maze = pg.Surface(maze_size) \r\n \r\n \r\n \r\n # Draw the outermost rectangles on self.maze\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((0, 3 * self.grid_size), (28 * self.grid_size, 31 * self.grid_size)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((0 + self.grid_size // 2, 3 * self.grid_size + self.grid_size // 2),(27 * self.grid_size, 30 * self.grid_size)), 4) \r\n # Draw the inner rectangles\r\n for y in self.look_up_table[3 : -2]: #< y is a list of one row from the maze\r\n for x in y: #< x is a string that is decoded as already explained\r\n pos = [self.grid_size * x_count, self.grid_size * y_count]\r\n # Set reference position in the middle of one square\r\n pos[0] += self.grid_size // 2\r\n pos[1] += self.grid_size // 2\r\n x_count += 1\r\n # Check if x is rectangle\r\n if x != None and x[0] == 'r':\r\n # When the size of the string is equal or greater than 4 it's rectangle with a specific size and not just a border.\r\n if len(x) >= 4:\r\n # get the x and y size of the rectangle. x will be something like 'rx1_y1' x1 resprestens the size in x direction and y1 in y direction.\r\n xy_dim = x[1:].split(\"_\") \r\n xy_dim[0] = int(xy_dim[0])\r\n xy_dim[1] = int(xy_dim[1])\r\n rect = tuple(pos), (xy_dim[0] * self.grid_size , xy_dim[1] * self.grid_size )\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], rect, self.width)\r\n # If the last char is a w (white), u (up) or l (left) a line gets draw one a specific position \r\n if x[-1] == 'w':\r\n self.draw_line(self.maze, 'u', (x_count,y_count), True)\r\n if x[-1] == 'u' or x[-1] == 'l':\r\n if x_count == 0:\r\n self.draw_line(self.maze, x[-1], (len(y), y_count))\r\n else:\r\n self.draw_line(self.maze, x[-1], (x_count, y_count))\r\n \r\n y_count += 1\r\n x_count = 0\r\n # Just some cosmetic drawing\r\n pg.draw.rect(self.maze, Colors.colors['BLACK'], ((0, 12 * self.grid_size + self.grid_size // 2 + 4), (self.grid_size // 2 + 1, 10 * self.grid_size - 4)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLACK'], ((28 * self.grid_size - self.grid_size // 2 - 1, 12 * self.grid_size + self.grid_size // 2 + 4), (self.grid_size // 2 + 1, 10 * self.grid_size - 4)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((-self.width, 13 * self.grid_size), (5 * self.grid_size, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((-self.width, 19 * self.grid_size), (5 * self.grid_size, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((23 * self.grid_size, 13 * self.grid_size), (5 * self.grid_size + 10, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((23 * self.grid_size, 19 * self.grid_size), (5 * self.grid_size + 10, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((11 * self.grid_size, 16 * self.grid_size), (6 * self.grid_size, 3 * self.grid_size)), self.width)\r\n \r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (0, 16 * self.grid_size + self.grid_size // 2 - 1), (self.grid_size // 2 + self.width, 16 * self.grid_size + self.grid_size // 2 - 1), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (0, 18 * self.grid_size + self.grid_size // 2), (self.grid_size // 2 + self.width, 18 * self.grid_size + self.grid_size // 2), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (self.grid_size * 28 - self.grid_size, 16 * self.grid_size + self.grid_size // 2 - 1), (self.grid_size * 28 + self.width, 16 * self.grid_size + self.grid_size // 2 - 1), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (self.grid_size * 28 - self.grid_size, 18 * self.grid_size + self.grid_size // 2), (self.grid_size * 28 + self.width, 18 * self.grid_size + self.grid_size // 2), self.width)\r\n self.is_init = True", "def switch(self, x1, y1, x2, y2):\n # both positions should not be empty\n assert (self.is_empty(x1, y1) is not True) or (self.is_empty(x2, y2) is not True)\n # x1,y1 is empty\n if self.is_empty(x1, y1):\n self.grid[y1][x1] = self.grid[y2][x2]\n self.cells[self.grid[y2][x2]].x = x1\n self.cells[self.grid[y2][x2]].y = y1\n self.grid[y2][x2] = ' '\n self.update_cost(self.grid[y1][x1])\n # x2,y2 is empty\n elif self.is_empty(x2, y2):\n self.grid[y2][x2] = self.grid[y1][x1]\n self.cells[self.grid[y1][x1]].x = x2\n self.cells[self.grid[y1][x1]].y = y2\n self.grid[y1][x1] = ' '\n self.update_cost(self.grid[y2][x2])\n else:\n n = self.grid[y2][x2]\n self.grid[y2][x2] = self.grid[y1][x1]\n self.cells[self.grid[y1][x1]].x = x2\n self.cells[self.grid[y1][x1]].y = y2\n self.grid[y1][x1] = n\n self.cells[n].x = x1\n self.cells[n].y = y1\n self.update_cost(self.grid[y1][x1])\n self.update_cost(self.grid[y2][x2])", "def set_state( self ):", "def _bbTableDoubleClicked(self, row, col):\n it = self.table.item(row, col).text()\n\n try:\n idx = int(it) # decimal\n bb_path = self.ba.cache.bb_paths[idx]\n\n col = QtGui.QColorDialog.getColor()\n if col.isValid():\n # IDA works with BGR (annoying)\n ida_color = misc.pyside_to_ida_color(col.name())\n\n misc.paint_basic_blocks(bb_path, ida_color)\n return\n\n except IndexError:\n # Address value (does not contain [A-F]) is interpreted as index\n return\n\n except ValueError:\n # Address value (containing [A-F]) fucks up int()\n return", "def _update_battle_position(self, new_cells=[], previous_cells=[]):\n if previous_cells:\n for previous_cell in previous_cells:\n self._battle_area.set_cell(previous_cell.get_name(), False)\n if new_cells:\n for new_cell in new_cells:\n self._battle_area.set_cell(new_cell.get_name(), self)", "def draw(self, state):\n if state is None:\n state = self.model.current_state\n for row in range(len(self.model.maze.walls)):\n self.__draw_row_division()\n print(\" {0:2d} \".format(row), end='') # Imprime número da linha\n\n for col in range(len(self.model.maze.walls[0])):\n if self.model.maze.walls[row][col] == 1:\n print(\"|XXX\", end='') # Desenha parede\n elif self.model.goal_state.get_element(Coordinate(row, col)):\n if state.player.row == row and state.player.col == col:\n print(\"|G-P\", end='') # Desenha objetivo e jogador.\n elif state.get_element(Coordinate(row, col)):\n print(\"|G-B\", end='') # Desenha objetivo e caixa.\n else:\n print(\"| G\", end='') # Desenha objetivo\n elif state.player.row == row and state.player.col == col:\n print(\"| P\", end='') # Desenha jogador\n elif state.get_element(Coordinate(row, col)):\n print(\"| B\", end='') # Desenha caixa.\n else:\n print(\"| \", end='') # Desenha vazio\n print(\"|\")\n if row == (len(self.model.maze.walls) - 1):\n self.__draw_row_division()", "def test_live_cell(self, alive_cells, alive):\n for positions in alive_cells:\n world = gol.World(3, 3)\n world.set_cell((0, 0))\n for x, y in positions:\n world.set_cell((x, y))\n world.update()\n assert world[(0, 0)] == alive", "def perform_action(self, cell_location, player):\n cell = self.get_cell(cell_location)\n if cell is not None:\n if cell.get_cell_state() == 0 and player == 1:\n cell.set_cell_state(1)\n elif cell.get_cell_state() == 0 and player == 2:\n cell.set_cell_state(2)\n else:\n raise Exception(\"Move is not available because the cell is occupied\")\n else:\n raise Exception(\"Given cell location is invalid\")", "def update_board(self):\n for x in self.board:\n for f in x:\n if f.status == 0:\n if f.name == \"conway\":\n assert type(self.population)==int\n if f.live_neighbors == 3:\n f.symbol =\"*\"\n f.status = 1\n self.population += 1\n elif f.name == \"fredkin\":\n if f.live_neighbors == 1 or f.live_neighbors == 3 :\n f.status = 1\n f.symbol = str(f.age)\n self.population += 1\n else:\n f.status = 0\n\n elif f.status == 1:\n if f.name == \"conway\":\n assert type(self.population)==int\n #assert type(f.status)== 1\n if not((f.live_neighbors == 2 or f.live_neighbors == 3)):\n f.symbol = \".\"\n f.status = 0\n else:\n self.population += 1\n elif f.name == \"fredkin\":\n if f.live_neighbors == 1 or f.live_neighbors == 3:\n f.status = 1\n f.age += 1\n if f.age <= 2:\n f.symbol = str(f.age)\n self.population += 1\n else:\n self.board.replace(f, Conway_Cell(\"*\"))\n else:\n f.status = 0\n f.symbol = \"-\"", "def AddUnmappedState(self, state: str, point: str) -> None:\n self._valid = False\n self._unmapped_states.append((point, state))", "def __activate(self, x: int, y: int, tree: int) -> None:\n self.__maze[x, y] = tree", "def process_shot(self, coordinate):\n row_num = ord(coordinate[:1]) - ord('A')\n column_num = int(coordinate[1:]) - 1\n target_cell = self.grid[row_num][column_num]\n result = target_cell.process_shot()\n if result == constants.KILL:\n self.destroyed_ships += 1\n return result", "def end_commit(self):\n #scn = bpy.context.scene\n #for pt in self.b_pts:\n # point_obj = bpy.data.objects.new(pt.label, None)\n # point_obj.location = pt.location\n # scn.objects.link(point_obj)\n #self.end_commit_post()\n pass", "def __get_cell_state(self, y, x):\n\t\tif 0 <= y <= self.__height - 1:\n\t\t\tif 0 <= x <= self.__width - 1:\n\t\t\t\treturn self.__board[y][x]\n\t\treturn 0", "def mover_bm_derecha(self):\n self.nueva_posicion_posible_parte_superior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] + 1,\n self.casilla[1]],\n [self.vertice_2[0] + self.velocidad ,\n self.vertice_2[1]],\n [self.vertice_1[0] + 5, self.vertice_1[1]])\n self.nueva_posicion_posible_parte_inferior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] + 1,\n self.casilla[1] + 1],\n [self.vertice_4[0] + self.velocidad,\n self.vertice_4[1]],\n self.vertice_1)\n if self.nueva_posicion_posible_parte_superior[0] != 1 and self.nueva_posicion_posible_parte_inferior[0] != 1:\n self.x += self.velocidad * (self.x <= 655)\n self.posicion = [self.x,self.posicion[1]]\n self.casilla = [self.casilla[0] + self.nueva_posicion_posible_parte_superior[1], self.casilla[1]]\n self.redefinir_vertices()", "def mark_mine(self, cell):\n #if the cell is in the list of available cells, else do nothing\n if cell in self.cells:\n # identify the cell as a bomb, remove it from the list of existing bomb locations\n # no error message using discard method\n self.cells.discard(cell)\n self.count -= 1", "def __init__(self, height, width, mines):\n self.x = int(width)\n self.y = int(height)\n self.table_state = [\n ['-' for i in xrange(0, self.x)] for j in xrange(0, self.y)]\n self.mine_locations = self.generate_mines(int(mines))\n self.final_table = self.generate_answer()", "def updateJobsTable(self):\n self.checkJobsDict()\n jobdict = self.DB.meta.peatsa_jobs \n M = TableModel()\n #open job log from file\n f=open('jobstates.log','r')\n jl = pickle.load(f) \n for j in jobdict: \n jobid = jobdict[j] \n try:\n M.addRecord(j,state=jl[jobid]['State'],date=jl[jobid]['Date'])\n except:\n M.addRecord(j,state='Not in DB')\n self.jobstable = TableCanvas(self.tf, model=M, height=100, editable=False)\n self.jobstable.createTableFrame() \n self.log.yview('moveto', 1)\n f.close()\n return", "def result(self, state, action):\n \n worker = state[0]\n boxes = state[1]\n move = action[1]\n coord = action[0] \n newBoxes = []\n \n worker = coord\n \n for box in boxes:\n if box == coord:\n newBox = move_coords(box, move)\n newBoxes.append(newBox)\n else:\n newBoxes.append(box)\n \n newState = ((worker), tuple(newBoxes))\n return newState", "def m_move1(state,b1,b2):\n if b2 != 'hand' and not state.holding['hand']:\n if b2 == 'table':\n return [('clear',b1,True), ('pos', b1, 'hand'), ('pos', b1, b2)]\n else:\n return [('clear',b2,True), ('clear',b1,True), \\\n ('pos', b1, 'hand'), ('pos', b1, b2)]", "def _table_selected(self):\n selection_index = self._lb_tables.GetSelection()\n if selection_index != -1:\n table_id = self._tables[selection_index][0]\n\n #update table column selection\n columns_indexes = [tup[0] for tup in self._datafile.query(sciplot.database.Query(\"SELECT VariableID FROM TableColumn WHERE TableID = (?);\", [table_id], 1))[0]]\n new_checked_items = []\n column_ids = [tup[0] for tup in self._columns]\n\n for variable_id in columns_indexes:\n new_checked_items.append(column_ids.index(variable_id))\n\n self._ckl_columns.SetCheckedItems(new_checked_items)\n\n #update displayed table data\n self.refresh_table()", "def _update_board(self):\n\n self.game_board.update_board(self.tetrino_set)", "def setState(self, pointDict):\n self.validatePointDict(pointDict)\n state = []\n for indVar in self.indVars:\n assert indVar in pointDict,\\\n \"You have not specified an required independent variable in pointDict!\"\n state.append( pointDict[indVar] )\n self.physicalState = tuple(state)", "def make(self,state_board):\n\t\tstate_board[self.column][self.line] = self.couleur #place the piece\n\t\tdrawPiece((self.column,self.line),self.couleur) #draws it on the board\n\t\tfor pos in self.flips: #flips all the pieces in flips\n\t\t\tstate_board[pos[0]][pos[1]] = self.couleur\n\t\t\tdrawPiece(pos,self.couleur) #draws it on the board", "def change_board(self, merged_list, coordinate, direction):\n initial_y = coordinate[0]\n initial_x = coordinate[1]\n if direction == UP or direction == DOWN:\n\n #THE ISSUE IS HERE???\n for index in range(self.grid_width):\n self.board[initial_y][coordinate[1]] = merged_list[index]\n initial_y += OFFSETS[direction][0]\n else:\n for index in range(self.grid_height):\n self.board[coordinate[0]][initial_x] = merged_list[index]\n initial_x += OFFSETS[direction][1]", "def row_delete(active, objects, state):\n for i in range(10):\n if state[i] == 10:\n temp = []\n for obj in objects:\n if obj.pos_y > SIZE[1]-active.height-i*20:\n temp.append(obj)\n else:\n if obj.pos_y < SIZE[1]-active.height-i*20:\n obj.pos_y += 20\n temp.append(obj)\n objects = temp\n for j in range(9-i):\n state[i+j] = state[i+j+1]\n state[9] = 0\n return objects", "def __init__(self):\n \"\"\" action_ space : pick up location , Drop location\n state_space : location , time (hours) , day\n state_init : random pick from the state_space \"\"\"\n self.action_space = [(i,j) for i in range(m) for j in range(m) if i!=j or i==0]\n # Total states (Xi Tj Dk)\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n # random Initialize of state (location, hours, day)\n self.state_init = random.choice(self.state_space)\n # Start the first round\n self.reset()", "def successors(state):\n free_coordinates = []\n for i in range(3):\n for j in range(3):\n if state[i][j] == '_':\n free_coordinates.append([i, j])\n\n return free_coordinates", "def placement(self,event):\r\n x,y,ship=event.x,event.y,False\r\n [xmin,ymin,xmax,ymax] = self.can.coords(self.hitbox[self.select])\r\n k=2\r\n if self.select==1 or self.select==2 or self.select==0:k=1\r\n axe,a,b=1,0*k,-1*k\r\n if xmax-xmin == 46:axe,a,b=0*k,-1*k,0\r\n x,y=(x-20)//46,(y-96)//46\r\n if self.select!=-1:\r\n ship=self.game.j1.replace_ship(x+b,y+a,self.select,axe)\r\n if 0<=x<=11 and 0<=y<=11:\r\n self.game.j1.main_ship(x+b,y+a,self.select,axe)\r\n self.game.j1.affichage()", "def toggle_cell_at_point(self,x,**kw):\n c=self.delete_cell_at_point(x)\n if c is None:\n c=self.add_cell_at_point(x,**kw)\n return c", "def move_point_rect(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n old_points = list(mutated_genome[index][2])\n old_points[random.randint(0,1)] = point\n mutated_genome[index][2] = tuple(old_points)", "def setEditCursor(self, event):\n self.editMode = True\n self.updateCursor(\"X_cursor\")\n self.changeColor(self.lastChanged, self.colors['pentomino'])\n x = (event.y - self.margin) // self.cellSize\n y = (event.x - self.margin) // self.cellSize\n if not (0 <= x < self.rows and 0 <= y < self.cols):\n return\n if not self.gridBusy[x][y]:\n return\n assert len(self.history) >= self.gridBusy[x][y]\n self.lastChanged = self.gridBusy[x][y]\n self.changeColor(self.lastChanged, self.colors['pent_edit'])", "def test_dead_cell(self, alive_cells, alive):\n for positions in alive_cells:\n world = gol.World(3, 3)\n for x, y in positions:\n world.set_cell((x, y))\n world.update()\n assert world[(0, 0)] == alive", "def mover_bm_abajo(self):\n self.nueva_posicion_posible_parte_superior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0],\n self.casilla[1] + 1],\n [self.vertice_3[0], self.vertice_3[1] + self.velocidad],\n [self.vertice_1[0], self.vertice_1[1]+ 5])\n self.nueva_posicion_posible_parte_inferior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] + 1,\n self.casilla[1] + 1],\n [self.vertice_4[0],self.vertice_4[1] + self.velocidad],\n self.vertice_1)\n if self.nueva_posicion_posible_parte_superior[0] != 1 and self.nueva_posicion_posible_parte_inferior[0] != 1:\n self.y += self.velocidad * (self.y <= 655)\n self.posicion = [self.posicion[0],self.y]\n self.casilla = [self.casilla[0], self.casilla[1] + self.nueva_posicion_posible_parte_superior[1]]\n self.redefinir_vertices()", "def special_open_neighbours(self, y, x):\n if self.table_state[y][x] != \"-\" and self.table_state[y][x] == self.flags_nearby(y, x):\n l = [[ye, xe] for xe in range(\n x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]\n for ye, xe in l:\n if xe >= self.x or ye >= self.y: # do not open out of bounds\n continue\n # if it is a bomb but not flagged\n if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG:\n self.show_answer_board([ye, xe])\n print \"KABOOM!\"\n return Minesweeper.IS_A_BOMB\n self.open_neighbours(y, x)\n self.print_table(self.table_state)\n return Minesweeper.NOT_A_BOMB", "def get_coordinates(table, replace_columns=False, remove_nans=False):\n assert \"zip code\" in table.labels or ((\"city\" in table.labels or \"county\" in table.labels) and \"state\" in table.labels)\n ref = Table.read_table(pkg_resources.resource_filename(__name__, \"geodata/geocode_states.csv\"))\n\n index_name = \"\".join(table.labels) # Ensures that index can't possibly be one of the preexisting columns\n index_name += \" \"\n \n table = table.with_columns(index_name, np.arange(table.num_rows))\n lat = np.array([np.nan] * table.num_rows)\n lon = np.array([np.nan] * table.num_rows)\n unassigned = set(range(table.num_rows)) \n while len(unassigned) > 0:\n index = unassigned.pop()\n row = table.take(index).take(0)\n if \"zip code\" in table.labels:\n select = table.where(\"zip code\", row[\"zip code\"][0]).column(index_name)\n unassigned -= set(select)\n try:\n ref_lat, ref_lon = ref.where(\"zip\", int(row[\"zip code\"][0])).select(\"lat\", \"lon\").row(0)\n lat[select] = ref_lat\n lon[select] = ref_lon\n except IndexError:\n pass\n else:\n state_select = table.where(\"state\", row[\"state\"][0]).column(index_name)\n county_select = table.where(\"county\", row[\"county\"][0]).column(index_name) if \"county\" in table.labels else np.arange(table.num_rows)\n city_select = table.where(\"city\", row[\"city\"][0]).column(index_name) if \"city\" in table.labels else np.arange(table.num_rows)\n select = set.intersection(set(state_select), set(county_select), set(city_select))\n unassigned -= select\n select = list(select)\n try:\n matched_ref = ref.where(\"state\", row[\"state\"][0])\n if \"county\" in table.labels:\n matched_ref = matched_ref.where(\"county\", row[\"county\"][0].lower())\n if \"city\" in table.labels:\n matched_ref = matched_ref.where(\"city\", row[\"city\"][0].lower())\n ref_lat, ref_lon = matched_ref.select(\"lat\", \"lon\").row(0)\n lat[select] = ref_lat\n lon[select] = ref_lon\n except IndexError:\n pass\n table = table.with_columns(\"lat\", lat, \"lon\", lon)\n table = table.drop(index_name)\n if replace_columns:\n for label in [\"county\", \"city\", \"zip code\", \"state\"]:\n try:\n table = table.drop(label)\n except KeyError:\n pass\n if remove_nans: \n table = table.where(\"lat\", are.below(float(\"inf\"))) # NaNs are not considered to be smaller than infinity\n return table", "def updateQTable( self, reward, current_state ):", "def update_to_coord(self, point):\r\n if self._index_of_sel_point != -1 and self._index_of_sel_point <= len(self.points)-1:\r\n self._command_stack.do(model.structure.UpdatePoint(\r\n self._structure, self._index_of_sel_point, round(point[0]), round(point[1])))\r\n elif self._index_of_sel_point == len(self.points) or not self.points:\r\n self._command_stack.do(model.structure.AddPoint(\r\n self._structure, self._index_of_sel_point+1, round(point[0]), round(point[1])))\r\n if self._index_of_sel_point+1 >= len(self.points):\r\n self.winfo_toplevel().update()\r\n self._index_of_sel_point = len(self.points)\r\n else:\r\n self._set_selection(self._index_of_sel_point+1)\r\n self.winfo_toplevel().update()", "def fillIn(self):\n\n # Grabs first point (which is a shore) and prefills in hashes\n toBeAnalyzed = [self.points[0]]\n islandHash = defaultdict(list)\n islandHash[toBeAnalyzed[0].x].append(toBeAnalyzed[0].x)\n islandGridPoints = toBeAnalyzed[:]\n\n # Find all points not at pond-level.\n while toBeAnalyzed:\n gridPoint = toBeAnalyzed.pop()\n neighbors = self.analyzeData.iterateDiagonal(gridPoint.x,\n gridPoint.y)\n for _x, _y, elevation in neighbors:\n\n if elevation != self.pondElevation and _y not in\\\n islandHash[_x]:\n branch = GridPoint(_x, _y, elevation)\n islandHash[_x].append(_y)\n toBeAnalyzed.append(branch)\n islandGridPoints.append(branch)\n self.points = islandGridPoints", "def __init__(self):\n self._board = [\n\n ['', '', '', \"x\", '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n [\"o\", '', \"o\", '', \"o\", '', \"o\", ''],\n ]\n self._game_state = \"UNFINISHED\" # default game state\n self._current_row = 0 #helper used to enforce moving one row at a time\n self._current_x_row = 0 # tracks x's row coordinate\n self._current_x_column = 3 # tracks x's column coordinate\n\n #four coordinates tracking the available diagonal spaces of x\n self._lower_right = (self._current_x_row + 1, self._current_x_column + 1)\n self._lower_left = (self._current_x_row + 1, self._current_x_column - 1)\n self._upper_right = (self._current_x_row - 1, self._current_x_column + 1)\n self._upper_left = (self._current_x_row - 1, self._current_x_column - 1)\n\n #helper used to check if x is in the first column\n self._row1 = (\n self._board[0][0],\n self._board[1][0],\n self._board[2][0],\n self._board[3][0],\n self._board[4][0],\n self._board[5][0],\n self._board[6][0],\n self._board[7][0])\n #helper used to check if x is in the last column\n self._row7 = (\n self._board[0][7],\n self._board[1][7],\n self._board[2][7],\n self._board[3][7],\n self._board[4][7],\n self._board[5][7],\n self._board[6][7],\n self._board[7][7])", "def is_dirty(self, row: int, column: int) -> int:\n return 1 if self.__squares[row][column] == Floor._dirty else 0", "def convert_state(self, x, v):\n \n\n #print(self.offset[0] * self.tiling_displacement[len(self.tiling_displacement)-1][0] / self.tile_width[0])\n\n #state = 0\n n_features = self.total_tiles[0] * self.total_tiles[1] * self.n_tilings\n state = np.zeros(n_features, dtype=int)\n print(np.shape(state))\n\n for i in range(self.n_tilings):\n # Finds the index of the tile in both dimensions\n x_tile = (x - self.offset[0] * self.tiling_displacement[i][0] - self.x_range[0] + self.extra_tiles[0] * self.tile_width[0]) // self.tile_width[0]\n v_tile = (v - self.offset[1] * self.tiling_displacement[i][1] - self.v_range[0] + self.extra_tiles[1] * self.tile_width[1]) // self.tile_width[1]\n \n #x_tile = (x - self.offset[0] * self.tiling_displacement[i][0] - self.x_range[0] + self.extra_tiles[0] * self.tile_width[0]) // self.tile_width[0]\n #v_tile = (v - self.offset[1] * self.tiling_displacement[i][1] - self.v_range[0] + self.extra_tiles[1] * self.tile_width[1]) // self.tile_width[1]\n\n index = int(i * (self.total_tiles[0]*self.total_tiles[1]) + x_tile * self.total_tiles[0] + v_tile)\n print(\"INDEX\" , index)\n state[index] = 1\n\n\n\n\n \"\"\"\n # adds the correct bit (corresponding to the state of the tiling) to the state integer\n state += 2 ** (i * self.n_tiles**2 + x_tile * self.n_tiles + v_tile)\n \"\"\"\n print (\"Tiling %s: (%s,%s)\" % (i, x_tile, v_tile))\n\n return state", "def update_board(self, move):\n #new_move equals the gird with selection(Which is the players input)\n new_move = self.grid[move]\n\n # check if column selected by player is full if the first index (top) has a game piece\n if new_move[0] != \" \" :\n return True\n\n # this will get the correct column and add the player's move\n # subtract player column selection by 1 to select correct column\n adjustment = -1\n while new_move[adjustment] != \" \":\n adjustment -= 1\n\n # update the grid with the selected column by the player\n new_move[adjustment] = self.playing_player[1]\n return False", "def set_cell(state: State) -> State:\n assert state.index < state.array_len\n return state._replace(\n array=state.array[: state.index] + [state.acc] + state.array[state.index + 1 :]\n )", "def update1(self):\r\n tmp = [row.copy() for row in self.grid]\r\n changed = False\r\n for y in range(self.height):\r\n for x in range(self.width):\r\n if self.grid[y][x] == '#' and 5 <= sum(\r\n self.is_occupied((x + i, y + j)) for i in [-1, 0, 1] for j in [-1, 0, 1]):\r\n # >= 5, because we also count (x,y)\r\n tmp[y][x] = 'L'\r\n changed = True\r\n elif self.grid[y][x] == 'L' and self.is_available(x, y):\r\n tmp[y][x] = '#'\r\n changed = True\r\n else:\r\n tmp[y][x] = self.grid[y][x]\r\n self.grid = tmp\r\n return changed", "def cleanTileAtPosition(self, pos):\n #Return the floor of x as a float, the largest integer value less than\n #or equal to x\n posx = pos.getX()\n posy = pos.getY()\n posx = math.floor(posx)\n posy = math.floor(posy)\n self.tiles[(posx, posy)] = 1 # using 0 as dirty value, 1 as clean value, of key tuple pos(x,y)\n #self.printTiles()\n #raise NotImplementedError", "def put_cell(self, x, y, num):\n if self.is_empty(x,y):\n self.grid[y][x] = num\n return True\n return False", "def isGoalState(self, state):\n x,y = state\n\n \"*** YOUR CODE HERE ***\"\n return self.food[x][y]", "def draw_selected(self):\n if self.get_selected() is not None and not self.check_if_locked(self.get_selected()):\n self.color_cell(pos=self.get_selected(\n ), color=SELECTED_INVALID if self.get_selected() in self.invalid else SELECTED)", "def left_click(self, event):\n if self.text == 'True': # player clicks on bomb\n messagebox.showerror('Minesweeper', 'Kaboom! You lose.', parent=self)\n self.parentGrid.reveal_bombs()\n elif self.text != '0': # reveal cell\n self['text'] = self.text\n self['fg'] = self.textColor\n self['bg'] = 'light gray'\n self['relief'] = SUNKEN\n self.parentGrid.exposed_new_cell(self.coord)\n else: # reveal cell(s) if it is a blank cell\n self.parentGrid.reveal_blank_cells(self)", "def update(match):\r\n \r\n \r\n coordinates= match.board\r\n \r\n rows=len(match.board)\r\n column=len(match.board[0])\r\n for x in range(rows):\r\n for y in range(column):\r\n cell_up = match.board[wrapx(x)][wrapy(y+1)]\r\n cell_down = match.board[wrapx(x)][wrapy(y-1)]\r\n cell_right = match.board[wrapx(x+1)][wrapy(y)]\r\n cell_left = match.board[wrapx(x-1)][wrapy(y)]\r\n cell_diagupright = match.board[wrapx(x+1)][wrapy(y+1)]\r\n cell_diagupleft = match.board[wrapx(x-1)][wrapy(y+1)]\r\n cell_diagdownright = match.board[wrapx(x+1)][wrapy(y-1)] \r\n cell_diagdownleft = match.board[wrapx(x-1)][wrapy(y-1)]\r\n \r\n listofneightbours = [cell_up, cell_down, cell_right, cell_left, cell_diagupright, cell_diagupleft,\r\n cell_diagdownright, cell_diagdownleft]\r\n aliveneighbours = listofneighbours.count(1)\r\n \r\n if aliveneighbours < 2:\r\n x = 0\r\n elif aliveneighbours == 2:\r\n x = 1\r\n elif aliveneighbours == 3:\r\n x = 1\r\n else:\r\n x = 0" ]
[ "0.61604476", "0.57963514", "0.5728119", "0.5681672", "0.5650815", "0.5578061", "0.55363643", "0.55130213", "0.548478", "0.5473019", "0.54568815", "0.54379356", "0.5364507", "0.5332647", "0.53126705", "0.5300106", "0.52811605", "0.5278566", "0.52776027", "0.5275714", "0.5265053", "0.5261587", "0.5239645", "0.5238544", "0.5236494", "0.5228478", "0.5191042", "0.51905423", "0.518508", "0.51773024", "0.5166937", "0.5166466", "0.51622635", "0.51465464", "0.5146502", "0.5146255", "0.5144119", "0.51404655", "0.5137932", "0.5117369", "0.5109505", "0.51078725", "0.50959873", "0.5089756", "0.508744", "0.5080654", "0.50801367", "0.5064982", "0.5061781", "0.50575435", "0.5055827", "0.5043006", "0.5040343", "0.5039038", "0.503848", "0.5030882", "0.50281054", "0.50272286", "0.50159264", "0.50148094", "0.5009551", "0.50040764", "0.5001291", "0.49999893", "0.49859357", "0.49755222", "0.49689454", "0.49688575", "0.49655014", "0.4964466", "0.4963173", "0.4959337", "0.49566984", "0.4955797", "0.49552125", "0.49549", "0.49496585", "0.49455765", "0.49446273", "0.4943508", "0.49421173", "0.4940607", "0.4940504", "0.4939832", "0.4939696", "0.49337238", "0.49332312", "0.4930719", "0.49290335", "0.4926551", "0.49257863", "0.4919624", "0.49158534", "0.4914459", "0.49128607", "0.4911364", "0.49109393", "0.49081895", "0.4905731", "0.4905569" ]
0.6044062
1
prints the answer table with print_table.
def show_answer_board(self, coords): Minesweeper.print_table(self.final_table, coords)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_table(self):\n print(\"%-12s%-12s%-12s%-12s%-12s\" % (\"index\",\"balance\",\"payment\",\"interest\",\"amortization\"))\n print(\"-------------------------------------------------------------\")\n for i in self.table[\"index\"]:\n print(\"%-12i%-12i%-12i%-12i%-12i\" % (self.table[\"index\"][i],self.table[\"balance\"][i]\\\n ,self.table[\"payment\"][i],self.table[\"interest\"][i],\\\n self.table[\"amortization\"][i]))", "def print_table(self) -> None:\n if (self.probability_links == None):\n print(\"+--------+\")\n print(f\"| P({self.key:1s}) |\")\n print(\"+--------+\")\n print(f\"| {self.probability_values[0]:0.04f} |\")\n print(\"+--------+\")\n else:\n arg_len = 2 + len(' '.join(self.probability_links.keys()))\n param_len = 2 + \\\n max(6, len(\"P(A|)\" + \",\".join(self.probability_links.keys())))\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")\n print(\n f\"| {' '.join(self.probability_links.keys())} | P({self.key}|{','.join(self.probability_links.keys())}) |\")\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")\n for i in range(2**len(self.probability_links.keys())):\n # Gives us a string binary value to make truth table off of\n bool_key = f\"{i:0{len(self.probability_links.keys())}b}\"\n print(\n f\"| {' '.join(['T' if bool_key[j] == '0' else 'F' for j in range(len(self.probability_links.keys()))])} | {f'{self.probability_values[i]:0.04f}':<{param_len-1}s}|\")\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")", "def print_mistakes_table():\n conn = sq.connect(host='localhost', user='root',\n password='student', database='quiz')\n cursor = conn.cursor()\n\n cursor.execute(\"select * from mistakes\")\n data = cursor.fetchall()\n\n table = PrettyTable()\n table.field_names = ['Question', 'Given Answer','User Given Answer']\n for row in data:\n table.add_row(row)\n conn.close()\n\n return table", "def print_table(self, table):\n raise NotImplementedError('print_table method not defined!')", "def __print_work_table(table):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % ('Act', 'Pred', 'Block', 'Dummy', 'Succ', 'start', 'end')\n for k, col in sorted(table.items()):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % tuple(\n [str(k)] + [list(col[0])] + [str(col[i]) for i in range(1, len(col))])", "def printqtable(self):\n\t\tout = \"PRINTING QTABLE\\n\"\n\t\tfor key in self.qtable:\n\t\t\tout += \"state: \" + str(key) + \"\\n\"\n\t\t\tfor i in range(self.game.pips+1):\n\t\t\t\tout += f\"rew{i}: {self.qtable[key][i]:.3f} \"\n\t\t\tout += \"\\n\"\n\t\treturn out", "def print_tables(self):\n print \"------------------\\nTables\\n------------------\"\n cnt = 0\n for x in self.show_tables():\n cnt += 1\n print (\"{0}.) {1}\".format(cnt, x[0]))", "def print_table():\n for key in _op_table.keys():\n print(key)\n for sub_key in _op_table[key]:\n print('\\t--' + sub_key)", "def print_table(hdrs, flag=False, data=[],fmt='psql'):\n\tres = cur.fetchall()\n\tif flag:\n\t\tres = data\n\tprint(tabulate(res, headers=hdrs, tablefmt=fmt))", "def print_table(table):\n for i in range(len(table)):\n print \"Row \", i, \"\\t\",\n for j in range(len(table[i])):\n print table[i][j],\n print \"\\n\"", "def print_table(table):\n for row in table:\n print(row)", "def print_table(table):\n for row in table:\n print(row)", "def print_table(table):\n for row in table:\n print(row)", "def print_table(emojis):\n if len(emojis) > 0:\n table = []\n for i in emojis:\n table.append([i.get('id'), i.get('title'), i.get('emoji')])\n print(tabulate(table, headers=[\"ID\", \"Title\", \"Emoji\"]))\n else:\n print(\"¯\\_(ツ)_/¯ Nothing to see here...\")", "def print_table(table):\n # transpose the table:\n table = map(list, zip(*table))\n # get the column width:\n col_width = [max(len(str(x)) for x in col) for col in zip(*table)]\n # print it to screen:\n print\n for line in table:\n print \"| \" + \" | \".join(\"{:{}}\".format(x, col_width[i]) for i, x in enumerate(line)) + \" |\"\n print", "def print_table(table):\n rest = table[1:]\n fmt = \"%-28s %-9s %-16s %s\"\n for row in rest:\n print(fmt % tuple(row))", "def print_report(self):\n print '=' * 20 + ' %s ' % self.label + '=' * 20\n print '%-20s%5s\\t%4s\\t%4s\\t%4s\\t%4s' % (\n 'Hand' + '=' * 16, '#', 'Frac', 'W', 'Tie', 'L')\n for hand, result_dict in self.counts.iteritems():\n total_for_hand = sum(result_dict.itervalues())\n if total_for_hand == 0:\n win_frac = 0.0\n tie_frac = 0.0\n loss_frac = 0.0\n else:\n win_frac = float(result_dict[WIN_RESULT])/total_for_hand\n tie_frac = float(result_dict[TIE_RESULT])/total_for_hand\n loss_frac = float(\n result_dict[LOSS_RESULT])/total_for_hand\n print '%-20s%5d\\t%0.3f\\t%0.3f\\t%0.3f\\t%0.3f' % (\n hand, total_for_hand, float(total_for_hand)/self.total_items,\n win_frac, tie_frac, loss_frac)", "def print_table(data):\n for key in sorted(data):\n print \"%s: %s\" % (key.rjust(16), data[key])", "def print_table(self, items, fields):\r\n formats = []\r\n borders = []\r\n for f in fields:\r\n length = max(len(f),\r\n max([len(self.string(getattr(i, f))) for i in items]))\r\n justify = '>' if isinstance(getattr(\r\n items[0], f), int) or f == 'size' or f == 'reward' else '<'\r\n formats.append('{:' + justify + self.string(length + 2) + '}')\r\n borders.append('-' * length + ' ')\r\n row_format = u''.join(formats)\r\n headers = [f + ' ' for f in fields]\r\n print(row_format.format(*headers))\r\n print(row_format.format(*borders))\r\n for i in items:\r\n i_fields = [self.string(getattr(i, f)) + ' ' for f in fields]\r\n try:\r\n print(row_format.format(*i_fields))\r\n except UnicodeEncodeError:\r\n print(row_format.format(*i_fields).encode('utf-8'))", "def show_table(self, keys=None, sort_keys_function=None):\n rows = []\n output_keys = keys or self.keys\n\n for item in self.__get_items(sort_keys_function):\n row = []\n for output_key in output_keys:\n row.append(getattr(item, self.mapping[output_key]))\n rows.append(row)\n print(tabulate(rows, output_keys))", "def show_table(table):\n # id: string\n # Unique and random generated (at least 2 special char()expect: ';'),\n # 2 number, 2 lower and 2 upper case letter)\n # title: string\n # manufacturer: string\n # price: number (dollars)\n # in_stock: number\n title_list = [\"ID\", \"Title\", \"Manufacturer\",\n \"Price\", \"Number in stock\"]\n ui.print_table(table, title_list)", "def print_truth_table(formula):\r\n # Task 2.5\r\n variables = list(formula.variables())\r\n sorted_variables = sorted(variables)\r\n print(\"|\", end=\"\")\r\n for variable in sorted_variables:\r\n print(\" \" + variable + \" |\", end=\"\")\r\n print(\" \" + formula.infix() + \" |\")\r\n print(\"|\", end=\"\")\r\n for variable in sorted_variables:\r\n current_variable_hyphens = \"\"\r\n for letter in range(len(variable)):\r\n current_variable_hyphens += \"-\"\r\n print(\"-\" + current_variable_hyphens + \"-|\", end=\"\")\r\n formula_hyphens = \"\"\r\n for letter in range(len(formula.infix())):\r\n formula_hyphens += \"-\"\r\n print(\"-\" + formula_hyphens + \"-|\")\r\n models = list(all_models(sorted_variables))\r\n values = truth_values(formula, models)\r\n formula_spaces = \"\"\r\n for letter in range(len(formula.infix()) - 1):\r\n formula_spaces += \" \"\r\n for model, value in zip(models, values):\r\n print(\"|\", end=\"\")\r\n for variable in sorted_variables:\r\n variable_spaces = \"\"\r\n for i in range(len(variable)):\r\n variable_spaces += \" \"\r\n if model[variable]:\r\n print(\" T\" + variable_spaces + \"|\", end=\"\")\r\n else:\r\n print(\" F\" + variable_spaces + \"|\", end=\"\")\r\n if value:\r\n print(\" T\" + formula_spaces + \" |\")\r\n else:\r\n print(\" F\" + formula_spaces + \" |\")", "def print_truth_table(formula: Formula) -> None:\r\n # Task 2.4\r\n\r\n headers = list()\r\n for var in list(formula.variables()):\r\n headers.append(var)\r\n headers = sorted(headers) # variable names sorted alphabetic\r\n table = list()\r\n result_lst = list()\r\n index = 0\r\n all_models_local = all_models(list(formula.variables()), True)\r\n for result in truth_values(formula, all_models_local):\r\n if result:\r\n result_lst.append(\"T\")\r\n else:\r\n result_lst.append(\"F\")\r\n\r\n temp_lst = list()\r\n for mod_dict in all_models_local:\r\n for var in headers:\r\n if mod_dict.get(var):\r\n temp_lst.append(\"T\")\r\n else:\r\n temp_lst.append(\"F\")\r\n\r\n # adding the content of result\r\n temp_lst.append(result_lst[index])\r\n index += 1\r\n table.append(temp_lst.copy())\r\n temp_lst.clear()\r\n\r\n # for var in list(formula.variables()):\r\n # headers.append(var)\r\n headers.append(str(formula)) # the result\r\n # headers = sorted(headers)\r\n from tabulate import tabulate\r\n print(tabulate(table, headers, tablefmt=\"orgtbl\").replace(\"+\", \"|\"))", "def print_table(headers, rows):\n try:\n if headers:\n print('\\n')\n print(tabulate.tabulate(\n rows, headers=headers,\n tablefmt=\"plain\", numalign=\"left\"\n ))\n print('\\n')\n except Exception as e:\n print(e.message)", "def print_table(response, title):\n print title + ':'\n\n if 'rows' not in response:\n print 'Empty response'\n return\n\n rows = response['rows']\n row_format = '{:<20}' + '{:>20}' * 4\n print row_format.format('Keys', 'Clicks', 'Impressions', 'CTR', 'Position')\n for row in rows:\n keys = ''\n # Keys are returned only if one or more dimensions are requested.\n if 'keys' in row:\n keys = u','.join(row['keys']).encode('utf-8')\n print row_format.format(\n keys, row['clicks'], row['impressions'], row['ctr'], row['position'])", "def print_table(table):\r\n print('/-----------------------------------------------------------------------------------\\\\')\r\n for item in table:\r\n\r\n while len(item[1]) <= 22:\r\n item[1] += ' '\r\n\r\n while len(item[2]) <= 27:\r\n item[2] += ' '\r\n\r\n while len(item[0]) <= 15:\r\n item[0] += ' '\r\n\r\n print('| '+item[0]+' | '+item[1]+'| '+item[2]+' |')\r\n\r\n print('\\\\-----------------------------------------------------------------------------------/')", "def print_table(rows, labels=None):\n if labels is None:\n labels = ROW_LABELS\n\n output_table = prettytable.PrettyTable()\n output_table.field_names = labels\n output_table.align = 'l'\n output_table.vrules = prettytable.prettytable.ALL\n output_table.hrules = prettytable.prettytable.HEADER\n\n for row in rows:\n row = [x.strip() for x in row]\n output_table.add_row(row)\n\n print output_table\n print ''", "def visualise_q_table(q_table):\n # extract best acts\n act_table = np.zeros((4, 4))\n str_table = []\n for row in range(4):\n str_table.append(\"\")\n for col in range(4):\n pos = row * 4 + col\n max_q = None\n max_a = None\n for a in range(4):\n q = q_table[(pos, a)]\n if max_q is None or q > max_q:\n max_q = q\n max_a = a\n act_table[row, col] = max_a\n str_table[row] += act_to_str(max_a)\n\n # print best actions in human_readable format\n print(\"\\nAction selection table:\")\n for row_str in str_table:\n print(row_str)\n print()", "def print_table(listx):\r\n\tfor lists in listx:\r\n\t\tfor i in lists:\r\n\t\t\tprint str(i) , '\\t',\r\n\t\tprint()", "def print_table(table):\n for row in table:\n # Header column left justified\n print(\"{:<19}\".format(row[0]), end='')\n # Remaining columns right justified\n for col in row[1:]:\n print(\"{:>4}\".format(col), end='')\n print(\"\", end='\\n')", "def print_table(table, fieldnames):\n print(\"{:<19}\".format(fieldnames[0]), end='')\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(field), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(row[field]), end='')\n print(\"\", end='\\n')", "def print_table(table, fieldnames):\n print(\"{:<19}\".format(fieldnames[0]), end='')\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(field), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(row[field]), end='')\n print(\"\", end='\\n')", "def drawTable(listOfWord, listOfFrequency):\r\n\r\n\tprint(\"Distribusi frekuensi kata: \")\t\t\t\t # judul di atas tabel\r\n\tprint('-' * 40)\r\n\tprint('{:3s} {:25s} {:10s}'.format('No.', 'Kata', 'Frekuensi'))\r\n\tprint('-' * 40)\r\n\r\n\tnumber = 0\t\t\t\t\t\t\t\t# penomoran poin di dalam tabel\r\n\tindexCounter = 0\t\t\t\t\t\t\t\t\t\r\n\tfor word in listOfWord:\t\t\t\t\t\t\t# mencetak isi tabel\r\n\t\tnumber += 1\r\n\t\tprint('{:3d} {:26s} {:<9d}'.format(number, word, listOfFrequency[indexCounter]))\r\n\t\tindexCounter += 1\r\n\r\n\tprint('-' * 40)", "def print_movie_table(self):\n self = self\n headers = [\"Votes\", \"Rank\", \"Year\", \"Title\"]\n self.handler.header(headers)\n\n for movie in self.movie_list:\n self.handler.row([str(movie.get_votes()), str(movie.get_rank()),\n str(movie.get_year()), str(movie.get_title())])\n\n self.handler.footer()", "def print_truth_table(formula: Formula) -> None:\n # Task 2.4\n variables = list(sorted(formula.variables()))\n assignment_dict = all_models(list(variables))\n assignment_results = list(truth_values(formula, assignment_dict))\n arr = []\n for i, assignment in enumerate(assignment_dict):\n vals = list(assignment.values())\n vals.append(assignment_results[i])\n vals = ['T' if i == True else 'F' for i in vals]\n arr.append(vals)\n\n variables.append(str(formula))\n table_printer(variables, arr)", "def ProbCorrectTable():\n efficacies = [3, 1.5, 0, -1.5, -3]\n difficulties = [-1.85, -0.05, 1.75]\n\n for eff in efficacies:\n print('%0.2f & ' % eff, end=' ') \n for diff in difficulties:\n p = ProbCorrect(eff, diff)\n print('%0.2f & ' % p, end=' ') \n print(r'\\\\')", "def print_tables(hash_table, f_output, l_samples):\n\n l_fields = ['chr', 'pos', 'ref', 'alt', 'QUAL', 'FILTER',\n 'Func.refGene', 'Gene.refGene', 'GeneDetail.refGene', 'ExonicFunc.refGene', 'AAChange.refGene',\n 'cytoBand', 'ExAC_ALL', 'ExAC_AFR', 'ExAC_AMR', 'ExAC_EAS', 'ExAC_FIN', 'ExAC_NFE', 'ExAC_OTH',\n 'ExAC_SAS',\n 'avsnp147', 'SIFT_score', 'SIFT_pred', 'Polyphen2_HDIV_score', 'Polyphen2_HDIV_pred',\n 'Polyphen2_HVAR_score',\n 'Polyphen2_HVAR_pred', 'LRT_score', 'LRT_pred', 'MutationTaster_score', 'MutationTaster_pred',\n 'MutationAssessor_score', 'MutationAssessor_pred', 'FATHMM_score', 'FATHMM_pred', 'PROVEAN_score',\n 'PROVEAN_pred', 'VEST3_score', 'CADD_raw', 'CADD_phred', 'DANN_score', 'fathmm-MKL_coding_score',\n 'fathmm-MKL_coding_pred', 'MetaSVM_score', 'MetaSVM_pred', 'MetaLR_score', 'MetaLR_pred',\n 'integrated_fitCons_score', 'integrated_confidence_value', 'GERP++_RS', 'phyloP7way_vertebrate',\n 'phyloP20way_mammalian', 'phastCons7way_vertebrate', 'phastCons20way_mammalian', 'SiPhy_29way_logOdds']\n l_fields = l_fields + l_samples\n \n l_chr = set([item[0] for item in hash_table.keys()])\n\n fo = open(f_output, 'w')\n fo.write(','.join(l_fields) + '\\n')\n for key in sorted(hash_table.keys(), key=itemgetter(1)):\n fo.write(','.join(map(lambda field: hash_table[key].get(field, '.'), l_fields)) + '\\n')\n fo.close()", "def print_top_answers(answers):\n print(\"Possible answers:\")\n print(\"-\" * 40)\n for res in answers:\n print(unicode(u\"{0:.2f}\\t{1}\".format(res[1], res[0])))", "def print_results(identity_list):\n inversed_indexes = {v: k for k, v in INDEXES.items()}\n highest_val = identity_list[0][0]\n highest_hand = \"A A\"\n lowest_val = highest_val\n lowest_hand = \"A A\"\n running_total = 0.0\n\n print(f\"This table contains win percentages from comparing {HANDS} hands\")\n print(f\"against each other in {SIMULATIONS} simulations\\n\")\n print(\" A K Q J T 9 8 7 6 5 4 3 2\\n\")\n for row in range(len(INDEXES)):\n print(f\"{inversed_indexes[row]} \", end=\"\")\n for col in range(len(INDEXES)):\n print(f\"{format(identity_list[row][col], '.2f')}\", end=\" \") # To two decimal places\n\n # Update highest/lowest values\n if identity_list[row][col] > highest_val:\n highest_val = identity_list[row][col]\n highest_hand = f\"{inversed_indexes[row]} {inversed_indexes[col]}\"\n if row != col:\n suited = True if col > row else False\n highest_hand += ' suited' if suited else ' off'\n\n if identity_list[row][col] < lowest_val:\n lowest_val = identity_list[row][col]\n lowest_hand = f\"{inversed_indexes[row]} {inversed_indexes[col]}\"\n if row != col:\n suited = True if col > row else False\n lowest_hand += ' suited' if suited else ' off'\n\n # Update running total\n running_total += identity_list[row][col]\n\n print(\"\\n\")\n\n print(f\"The hand with the highest win percentage was {highest_hand} \", end=\"\")\n print(f\"with {format(highest_val, '.2f')}% of hands won\")\n print(f\"The hand with the lowest win percentage was {lowest_hand} \", end=\"\")\n print(f\"with {format(lowest_val, '.2f')}% of hands won\")\n print(f\"The average win percentage overall was \", end=\"\")\n print(f\"{format(running_total / len(INDEXES) ** 2, '.2f')}%\")", "def display(self):\n print(\n f'\\t\\t {self.name.upper()} {self.potency[0]}{self.potency[1]}\\t\\t'\n f' {self.dose_qty[0]} {self.dose_qty[1]} {self.dose[0]} {self.dose[1].upper()}')", "def _print_table(stats):\n max_key_len = max([len(key) for key in stats])\n width_right = 15\n width_left = max(width_right, max_key_len)\n divider = '+-' + '-' * width_left + '-+-' + '-' * width_right + '-+'\n\n def get_format_char(value):\n if isinstance(value, int):\n return 'd'\n elif isinstance(value, float):\n return '.4f'\n else:\n return 's'\n\n print(divider)\n for name, value in stats.items():\n left_format = f':>{width_left}s'\n right_format = f':<{width_right}{get_format_char(value)}'\n line_format = f'| {{{left_format}}} | {{{right_format}}} |'\n line = line_format.format(name, value)\n print(line)\n print(divider)", "def print_table(self, table, connection=None):\n\n connection = connection or self.engine.connect()\n result = connection.execute(select([table]))\n print(\n '-----------------------------------------------------------'\n '\\nColumns:\\n\\t{}\\nData:\\n\\t{}\\n'\n '-----------------------------------------------------------'.format(\n table.columns, '\\n\\t'.join(str(row) for row in result)\n )\n )\n\n result.close()", "def print_output_tables(cls,\n wfns=None, file=None,\n print_intensities=True,\n print_energies=True,\n print_energy_corrections=True,\n print_transition_moments=True,\n operators=None,\n logger=None, sep_char=\"=\", sep_len=100):\n\n if logger is None:\n logger = wfns.logger\n if logger is not None:\n def print_block(label, *args, **kwargs):\n with logger.block(tag=label):\n logger.log_print(\" \".join(\"{}\".format(x) for x in args), **kwargs)\n else:\n if file is None:\n file = sys.stdout\n\n def print_label(label, file=file, **opts):\n lablen = len(label) + 2\n split_l = int(np.floor((sep_len - lablen) / 2))\n split_r = int(np.ceil((sep_len - lablen) / 2))\n print(sep_char * split_l, label, sep_char * split_r, **opts, file=file)\n\n def print_footer(label=None, file=file, **opts):\n print(sep_char * sep_len, **opts, file=file)\n\n def print_block(label, *args, file=file, **kwargs):\n print_label(label, file=file, **kwargs)\n print(*args, file=file, **kwargs)\n print_footer(file=file, **kwargs)\n\n if print_energy_corrections:\n print_block(\"Energy Corrections\", wfns.format_energy_corrections_table())\n if print_energies:\n if wfns.degenerate_transformation is not None:\n print_block(\"Deperturbed Energies\",\n wfns.format_deperturbed_energies_table()\n )\n print_block(\n \"Degenerate Energies\",\n wfns.format_energies_table()\n )\n else:\n print_block(\"States Energies\",\n wfns.format_energies_table()\n )\n\n if print_intensities:\n ints = wfns.intensities # to make sure they're computed before printing starts\n if print_transition_moments:\n if wfns.degenerate_transformation is not None:\n for a, m in zip([\"X\", \"Y\", \"Z\"], wfns.format_deperturbed_dipole_contribs_tables()):\n print_block(\"{} Deperturbed Dipole Contributions\".format(a), m)\n\n print_block(\"Deperturbed IR Data\",\n wfns.format_deperturbed_intensities_table()\n )\n\n for a, m in zip([\"X\", \"Y\", \"Z\"], wfns.format_dipole_contribs_tables()):\n print_block(\"{} Dipole Contributions\".format(a), m)\n print_block(\"IR Data\", wfns.format_intensities_table())\n\n if operators is not None:\n print_block(\"Operator Data\", wfns.format_operator_table(operators))", "def print_table(rows, header=['Operation', 'OPS']):\n if len(rows) == 0:\n return\n col_max = [max([len(str(val[i])) for val in rows]) + 3 for i in range(len(rows[0]))]\n row_format = ''.join([\"{:<\" + str(length) + \"}\" for length in col_max])\n\n if len(header) > 0:\n print(row_format.format(*header))\n print(row_format.format(*['-' * (val - 2) for val in col_max]))\n\n for row in rows:\n print(row_format.format(*row))\n print(row_format.format(*['-' * (val - 3) for val in col_max]))", "def print_table2(df, eval_dir):\n\n out_file = os.path.join(eval_dir, 'table2.txt')\n\n\n with open(out_file, \"w\") as text_file:\n\n for idx, struc_name in enumerate(['LV', 'RV', 'Myo']):\n # new line\n header_string = ' & '\n line_string = '({}) '.format(struc_name)\n\n for p_idx, phase in enumerate(['ED', 'ES']):\n for measure in ['dice', 'assd', 'hd']:\n\n header_string += ' & {} ({}) '.format(phase, measure)\n\n dat = df.loc[(df['phase'] == phase) & (df['struc'] == struc_name)]\n\n if measure == 'dice':\n\n line_string += ' & {:.3f}\\,({:.3f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n else:\n line_string += ' & {:.2f}\\,({:.2f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n\n if p_idx == 0:\n header_string += ' & '\n line_string += ' & '\n\n header_string += ' \\\\\\\\ \\n'\n line_string += ' \\\\\\\\ \\n'\n\n if idx == 0:\n text_file.write(header_string)\n\n text_file.write(line_string)\n\n return 0", "def print_result_r(result, write=True):\n table = ''\n table += '%-6s%6s%6s%6s\\n' % ('#', 'start', 'end', 'score')\n for i in xrange(len(result['end'])):\n table += '%-6s%6s%6s%6s\\n' % (i+1, result['start'][i]+1,\n result['end'][i]+1,\n result['score'][i])\n if write:\n print table\n else:\n return table", "def print_details():\n\n print('\\n'\n 'SCORE: {0}\\n'\n 'COMPLEXITY: {1}\\n'\n .format(pwd_score, pwd_complex))\n\n print('Password as list: {0}\\n'.format(pwd_list))\n print('ns calculations: {0}\\n'.format(ns))\n print('Scores calculations: {0}\\n'.format(scores))\n print('Entropy: {0}\\n'.format(entropy))\n\n # store string lengths for table\n plength = {\n 'counts': 0,\n 'scores': 0,\n 'heading': 0\n }\n # loop value dicts to get lengths for table\n for k, v in ns.items():\n if len(str(v)) > plength['counts']:\n plength['counts'] = len(str(v))\n for k, v in scores.items():\n if len(str(v)) > plength['scores']:\n plength['scores'] = len(str(v))\n for k, v in stext.items():\n if len(v) > plength['heading']:\n plength['heading'] = len(v)\n\n # print table heading\n # t00, t11, t22 calculate indentation\n t00 = int(((plength['heading'] + 2 - 6) / 2)) * ' '\n t11 = int(((plength['counts'] + 1) / 2)) * ' '\n t22 = int(((plength['scores'] + 1) / 2)) * ' '\n print('{0}Metric{0}{1}Count{1}{2}Bonus'.format(t00, t11, t22))\n\n # print table content\n for k, v in stext.items():\n # get description\n t0 = stext[k]\n # indent count\n t1 = (plength['heading'] + plength['counts'] - len(stext[k]) - len(\n str(ns[k])) + 5) * ' '\n # get count\n t2 = ns[k]\n # indent score\n t3 = (plength['scores'] - len(str(scores[k])) + 5) * ' '\n # get score\n t4 = scores[k]\n print('{0}{1}{2}{3}{4}'.format(t0, t1, t2, t3, t4))", "def print_curation_table(self, **kwargs) -> None:\n s = self.get_curation_table(**kwargs)\n if s:\n print(s) # noqa:T201", "def print_table(ledger):\n\n table = PrettyTable() # defines a PrettyTable object\n\n table.field_names = [\n \"hospital\",\n \"patient\",\n \"status\",\n \"nonce\",\n \"prev_hash\",\n \"a\",\n \"b\",\n \"c\",\n \"current_hash\",\n ] # define field names for table\n\n for block in ledger:\n table.add_row(\n [\n block[\"hospital\"],\n block[\"patient\"],\n block[\"status\"],\n block[\"nonce\"],\n block[\"prev_hash\"],\n block[\"a\"],\n block[\"b\"],\n block[\"c\"],\n block[\"current_hash\"],\n ]\n ) # add data to table\n\n print(\"\\n\\n\" + color.BOLD + \"Printing Your Ledger:\" + color.END)\n print(table) # print prettytable of patient info", "def print_tabulated_output(array_obj, headers):\n print()\n print(tabulate(array_obj, headers=headers))\n print()", "def table(nb, max):\n\ti = 0\n\twhile i < max:\n\t\tprint(i + 1, \"*\", nb, \"=\", (i + 1) * nb)\n\t\ti += 1", "def result_table(fmt='latex_booktabs'):\n \n names = [\n \"ETF EW.\",\n \"Antonacci ETF\",\n \"Antonacci ETF Inv. Vol.\",\n \"Futures EW.\",\n \"Antonacci Futures\",\n \"Antonacci Futures Inv. Vol.\",\n \"TSMOM Futures Low Vol.\",\n \"TSMOM Futures High Vol.\"\n ]\n\n # Get stats for each strategy\n s1 = calculate.stats_from_parameters(name='Antonacci', price_set='ETF', fee_rate_bps=10, get_top=7, target_vol=40, periods=6, vol_weight=False)\n s2 = calculate.stats_from_parameters(name='Antonacci', price_set='ETF', fee_rate_bps=10, get_top=2, target_vol=40, periods=6, vol_weight=False)\n s3 = calculate.stats_from_parameters(name='Antonacci', price_set='ETF', fee_rate_bps=10, get_top=2, target_vol=40, periods=6, vol_weight=True)\n s4 = calculate.stats_from_parameters(name='Antonacci', price_set='Futures', fee_rate_bps=10, get_top=47, target_vol=40, periods=6, vol_weight=False)\n s5 = calculate.stats_from_parameters(name='Antonacci', price_set='Futures', fee_rate_bps=10, get_top=10, target_vol=40, periods=6, vol_weight=False)\n s6 = calculate.stats_from_parameters(name='Antonacci', price_set='Futures', fee_rate_bps=10, get_top=10, target_vol=40, periods=6, vol_weight=True)\n s7 = calculate.stats_from_parameters(name='TSMOM', price_set='Futures', fee_rate_bps=10, get_top=10, target_vol=40, periods=6, vol_weight=False)\n s8 = calculate.stats_from_parameters(name='TSMOM', price_set='Futures', fee_rate_bps=10, get_top=10, target_vol=100, periods=6, vol_weight=False)\n\n # The relevant columns from the summary data\n cols = [3, 4, 5, 6]\n num_assets = [7, 2, 2, 47, 10, 10, 47, 47]\n stats = [s1, s2, s3, s4, s5, s6, s7, s8]\n table = [names]\n \n # Collecting the results\n for i, col in enumerate(cols):\n col_list = [round(stat['summary'][col], 2) for stat in stats]\n table.append(col_list)\n\n table.append(num_assets)\n table = list(map(list, zip(*table))) # Transpose\n \n # Creating table headers\n headers = ['Strategy Name', 'Annual Return', 'Annual Vol.', 'Sharpe', 'Max. Drawdown', '# Assets']\n \n # Returning latex table\n tbl = tabulate(table, headers, tablefmt=fmt)\n print(tbl)\n \n return tbl", "def print_resamp_table(mean1, median1, mode1, mean2, median2, mode2):\n t = [['Streamwise\\nresolution', 'Before '\n +'After\\nresampling --> resampling', '\\nUnit'],\n ['Mean', str(mean1) + ' --> ' + str(mean2), 'm'],\n ['Median', str(median1) + ' --> ' + str(median2), 'm'],\n ['Mode', str(mode1) + ' --> ' + str(mode2), 'm']]\n print(tabulate(t, tablefmt='psql', stralign='center', headers='firstrow'))", "def show_table():\n\n title_list = ('ID', 'Platform', 'Producer', 'Year', 'Elements')\n \n return table, title_list", "def print_seq(self):\n names, values = [], []\n for each in self.minions:\n names.append(each.name)\n values.append(f'{each.atk}/{each.dfs}')\n t = PrettyTable()\n t.add_row(names)\n t.add_row(values)\n print(t)", "def ascii_table(self, tablefmt=\"pipe\"):\n methods = self.methods\n xvalues = self.xvalues\n plot_matrix = self.plot_matrix\n\n import tabulate\n # https://pypi.python.org/pypi/tabulate\n aug_table = np.hstack((np.array(methods)[:, np.newaxis], plot_matrix))\n return tabulate.tabulate(aug_table, xvalues, tablefmt=tablefmt)", "def print_results(self, question, query, desc=\"views\"):\n # get the resuls from the query\n results = self.db_results(query)\n # print out the question\n print(question)\n # go through the rows and format the results\n for row in results:\n print('\\t' + str(row[0]) + ' - ' + str(row[1]) + ' ' + desc)\n print", "def printTable(songs, language):\n\n attributes = \"\"\n if language == ENGLISH:\n print \"Song Name\".ljust(55) + \" | URL\".ljust(60) + \" | Status\\t\\t\"\n print \"-\" * 56 + \"+\" + \"-\" * 57 + \"+\" + \"-\" * 20\n rows = \"\"\n for i in range(len(songs[\"song\"])):\n rows = rows + songs[\"song\"][i][\"name\"].ljust(55) + \" | \" \\\n + songs[\"song\"][i][\"url\"].ljust(57) + \" | \" \\\n + songs[\"song\"][i][\"status\"] + \"\\n\"\n print rows \n else:\n print \"歌曲名稱\".ljust(55) + \" | URL\".ljust(60) + \" | 狀態\\t\\t\"\n print \"-\" * 52 + \"+\" + \"-\" * 59 + \"+\" + \"-\" * 20\n rows = \"\"\n for i in range(len(songs[\"song\"])):\n rows = rows + songs[\"song\"][i][\"name\"].ljust(51) + \" | \" \\\n + songs[\"song\"][i][\"url\"].ljust(57) + \" | \" \\\n + songs[\"song\"][i][\"status\"] + \"\\n\"\n print rows", "def print_table1(df, eval_dir):\n\n out_file = os.path.join(eval_dir, 'table1.txt')\n\n header_string = ' & '\n line_string = 'METHOD '\n\n\n for s_idx, struc_name in enumerate(['LV', 'RV', 'Myo']):\n for measure in ['dice', 'assd']:\n\n header_string += ' & {} ({}) '.format(measure, struc_name)\n\n dat = df.loc[df['struc'] == struc_name]\n\n if measure == 'dice':\n line_string += ' & {:.3f}\\,({:.3f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n else:\n line_string += ' & {:.2f}\\,({:.2f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n\n if s_idx < 2:\n header_string += ' & '\n line_string += ' & '\n\n header_string += ' \\\\\\\\ \\n'\n line_string += ' \\\\\\\\ \\n'\n\n with open(out_file, \"w\") as text_file:\n text_file.write(header_string)\n text_file.write(line_string)\n\n return 0", "def Table(self, line):\n if line is None:\n # TODO(user): Use resource_printer.TablePrinter() when it lands.\n if self._rows:\n cols = len(self._rows[0])\n width = [0 for _ in range(cols)]\n for row in self._rows:\n for i in range(cols - 1):\n w = len(row[i])\n if width[i] <= w:\n width[i] = w + 1\n for row in self._rows:\n self._out.write(' ' * (self._indent[self._level] + 2))\n for i in range(cols - 1):\n self._out.write(row[i].ljust(width[i]))\n self._out.write(row[-1] + '\\n')\n self._rows = []\n self._table = False\n self._out.write('\\n')\n elif not self._table:\n self._table = True\n self.Line()\n else:\n self._rows.append(line.split(','))", "def print_table(table):\n print(\"City \", end='')\n for month in MONTHS:\n print(\"{:>6}\".format(month), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for month in MONTHS:\n print(\"{:>6}\".format(row[month]), end='')\n print(\"\", end='\\n')", "def _print_summary(results):\n if not len(results) > 0:\n print 'No results to show in summary.'\n return\n\n table = {}\n for res in results:\n for k, v in res.iteritems():\n table.setdefault(k, []).append(v)\n print tabulate(table, headers='keys', tablefmt=\"simple\")", "def print_all(self):\n print(\n \"\"\"\\nContents of hash table, with blank lines separating distinct\n linked lists:\"\"\".replace(' ', ''))\n\n for linked_list in self.main_array:\n print(linked_list)\n print('')", "def print_table(n):\n \n numbers = list(range(1, n + 1))\n\n #处理第一行\n s = ''\n for i in numbers:\n s = s + '\\t' + str(i)\n print(s)\n\n for i in numbers:\n s = str(i)\n for j in numbers:\n s = s + '\\t' + str(i * j)\n print(s)", "def out(lam, eng, mat): # {{{1\n print(\"\\\\begin{table}[!htbp]\")\n print(\" \\\\renewcommand{\\\\arraystretch}{1.2}\")\n txt = \" \\\\caption{{\\\\label{{tab:{0}}}properties of {0}}}\"\n # Raw underscores in LaTeX text mode produce “Missing $” errors.\n texlname = lam.name.replace('_', '\\_')\n print(txt.format(texlname))\n print(\" \\\\centering\\\\footnotesize{\\\\rule{0pt}{10pt}\")\n print(\" \\\\tiny calculated by lamprop {}\\\\\\\\[3pt]}}\".format(__version__))\n if eng:\n _engprop(lam)\n if mat:\n _matrices(lam)\n print(\"\\\\end{table}\\n\") # 1}}}", "def print_individuals(self):\n pt = PrettyTable()\n pt.field_names = ['ID', 'Name', 'Gender', 'Birthday', 'Age', 'Alive', 'Death', 'Child', 'Spouse']\n for i in self.individuals.values():\n pt.add_row(i.get_values())\n print(pt)", "def printing_p_matrix(new_all_results):\n\tprint(\"________________________________PROBABILITY MATRIX__________________________________ \")\n\tfor i in range(len(new_all_results)):\n\t\tprint(\"Row Number: \", i+1)\n\t\tprint(\"Vector: \", all_states_explored[i])\n\t\tprint(\"Number of columns: \", len(new_all_results[i]))\n\t\tprint(\"Result: \", new_all_results[i])\n\t\tprint(\"-------------------------------------------------------------------------------------\")\n\tprint(\"____________________________________________________________________________________\")", "def start_table(self):\n self.result = \"<table>\\n\"", "def display_table(dict_list=None, user_config_data=None):\r\n if user_config_data is not None:\r\n # print(tabulate.tabulate(user_config_data, headers=['Variable', 'Value'], tablefmt=\"grid\"))\r\n print(tabulate.tabulate(user_config_data, tablefmt=\"grid\"))\r\n return\r\n\r\n header = [\"idx\"] + list(dict_list[0].keys())\r\n rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)]\r\n print(tabulate.tabulate(rows, header, tablefmt=\"grid\"))", "def display_table(dict_list=None, user_config_data=None):\r\n if user_config_data is not None:\r\n # print(tabulate.tabulate(user_config_data, headers=['Variable', 'Value'], tablefmt=\"grid\"))\r\n print(tabulate.tabulate(user_config_data, tablefmt=\"grid\"))\r\n return\r\n\r\n header = [\"idx\"] + list(dict_list[0].keys())\r\n rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)]\r\n print(tabulate.tabulate(rows, header, tablefmt=\"grid\"))", "def get_qtable_str(self):\n output = \"[\\n\"\n for row in self.qtable:\n output += \"\\t\" + str([round(x,2) for x in row]) + \",\\n\"\n output += \"]\\n\"\n\n return output", "def pprint_table(out, table):\n\n\tcol_paddings = []\n\n\tfor i in range(len(table[0])):\n\t\tcol_paddings.append(get_max_width(table, i))\n\n\tfor row in table:\n\t\t# left col\n\t\tout.write(str(row[0]).ljust(col_paddings[0] + 1))\n\t\t\n\t\t# rest of the cols\n\t\tfor i in range(1, len(row)):\n\t\t\tout.write(str(row[i]).rjust(col_paddings[i] + 2))\n\t\t\n\t\tout.write('\\n')", "def print_table_results(train, devel, test, tablefmt, file=sys.stdout):\n\n # Lazy load tabulate\n global tabulate\n if tabulate is None:\n try:\n from tabulate import tabulate\n except ImportError:\n print('Printing latex results requires the `tabulate` package. Tabulate can be installed by running: \\n'\n '$pip install tabulate')\n sys.exit(1)\n\n def _evaluate(dataset: dict, name: str, metrics=None):\n \"\"\"\n Fetch the given metrics from the given dataset metric dictionary in the order they were given\n :param dataset: dictionary containing metrics for a specific dataset\n :param metrics: list of metric names to fetch\n :return: list of metric values\n \"\"\"\n if metrics is None:\n metrics = ['Accuracy', 'AUROC', 'AUPRC', 'Precision', 'Recall', 'F1', 'F2']\n measures = [dataset[metric] for metric in metrics]\n measures.insert(0, name)\n return measures\n\n # Create a LaTeX table using tabulate\n table = tabulate([_evaluate(train, 'train'),\n _evaluate(devel, 'devel'),\n _evaluate(test, 'test')],\n headers=['Data', 'Acc.', 'AUROC', 'AUPRC', 'P', 'R', 'F1', 'F2'],\n tablefmt=tablefmt)\n print(table, file=file)", "def print_results(results):\n print()\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print(\"%% RESULTS %%\")\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print()\n print(\"Route \\t Cells \\t\")\n print(\"Length\\tChecked\\t Time\")\n print(\"--------------------------------\")\n print(\"{0}\\t{1}\\t{2}\".format(*results))\n print()", "def show_html_tables(html_tables):\n\n for (it,t) in enumerate(html_tables):\n print(f\"Table {it}\")\n for (ir,r) in enumerate(t):\n print(f\" Row {ir}\")\n for (ic,c) in enumerate(r):\n print(f\" Col {ic}: {c}\")", "def print_database(self):\n table_names = self.catalog\n for table_name in table_names:\n table = self.parse_table(table_name)\n if not table:\n continue\n print(f'TABLE NAME: {table_name}\\r\\n')\n print(tabulate(table, headers=\"keys\"))\n print('\\r\\n\\r\\n\\r\\n\\r\\n')", "def print_pretty_result(self):\n\t\tfor row in range(0, len(self.resulting_matrix)):\n\t\t\tfor col in range(0, len(self.resulting_matrix[self.__ROW_INDEX])):\n\t\t\t\tactual_number = self.resulting_matrix[row][col]\n\t\t\t\tif (actual_number != 0):\n\t\t\t\t\tprint (\"A la ciudad \", row+1, \" en la fabrica \", col+1, \" se envía \", actual_number)", "def print_table(seqids, data, outputfile, separator='\\t'):\n\n tags = data.keys()\n with open(outputfile, 'w') as out:\n out.write(separator.join([\"#Sequence ID\"] + list(tags)) + \"\\n\")\n for s in seqids:\n out.write(s)\n for t in tags:\n out.write(\"{}{}\".format(separator, data[t].get(s, \"\")))\n out.write(\"\\n\")", "def showState(self):\n for i in self.state[0]:\n for j in self.state[1]:\n print(self.table[i][j], end=\"\")\n print(\"\")", "def output_table(results, output, keys=None, sort_key=None):\n\n if output not in constants.TABLE_OUTPUT_FORMAT:\n raise ValueError(\"Output format must be{}, \"\n \"got {}\".format(constants.TABLE_OUTPUT_FORMAT,\n output))\n if output == 'print':\n if len(results) == 0:\n print 'No output!'\n return\n\n headers = [keys[k] for k in keys.keys()] if keys else results[0].keys()\n table = PrettyTable(headers)\n for line in results:\n table.add_row([line[k] if k in line else '' for k in (keys.keys() if keys else headers)])\n\n if sort_key:\n table.sortby = keys[sort_key] if keys else sort_key\n\n print table\n\n if output == 'csv':\n csvwriter = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL)\n keys = results[0].keys()\n csvwriter.writerow(keys)\n for row in results:\n csvwriter.writerow([row[k] for k in keys])\n\n if output == 'json':\n print json.dumps(results)", "def prompt_table(prompt, table):\n while True:\n print(prompt)\n for i in range(0, len(table)):\n row_format = \"{:>15}\" * (len(table[i]) + 1)\n print(f\"{i})\\t\" + row_format.format(\"\", *table[i]))\n response = prompt_base(\"\")\n try:\n response = int(response)\n if 0 <= response < len(table):\n return table[response]\n except:\n pass", "def print_para_table(s):\n if MODE == 1:\n t = [['Parameter', 'Value', 'Unit'],\n ['Number of bends', NBENDS, '/'], \n ['Width', WIDTH, 'm'],\n ['Depth', DEPTH, 'm'],\n ['Length', LAMBDA*(NBENDS+1), 'm'],\n ['Arc wavelength', LAMBDA, 'm'],\n ['Slope', SLOPE, '/'],\n ['Streamwise resolution', DS, 'm'],\n ['Transverse resolution', np.around(INTERVAL, decimals=4), 'm'],\n ['Streamwise # of pts', s.size + 2*int(LAMBDA/2/DS), '/'],\n ['Transverse # of pts', NUM*2+1, '/']]\n elif MODE == 2:\n if FNAME[0].islower():\n f = FNAME[0].upper() + FNAME[1:]\n else:\n f = FNAME\n t = [['Parameter', 'Value', 'Unit'],\n ['River name', f.rsplit('.', 1)[0], '/'],\n ['Width', WIDTH, 'm'],\n ['Depth', DEPTH, 'm'],\n ['Length', np.round(s[-1], decimals=2), 'm'],\n ['Slope', SLOPE, '/'],\n ['Streamwise resolution', np.round(np.mean(np.diff(s)), decimals=2), 'm'],\n ['Transverse resolution', np.round(INTERVAL, decimals=2), 'm'],\n ['Streamwise # of pts', s.size, '/'],\n ['Transverse # of pts', NUM*2+1, '/']]\n print(tabulate(t, tablefmt='psql', stralign='right', headers='firstrow'))", "def print_tables(self):\n\n conn = self.engine.connect()\n self.print_table(self.nodes, conn)\n self.print_table(self.paths, conn)\n self.view_tree(connection=conn)", "def print_table(dfinput,idcolumnIndex, longtable, landscape,selected_columns=None,legend=True):\n\tdf = dfinput if selected_columns is None else dfinput[selected_columns]\n\t\n\tsection = \"longtable\" if longtable else \"tabular\"\n\t\n\tcolsize = len(df.columns)\n\tnrows = df.shape[0]\n\tstart = \"\\\\begin{landscape}\" if landscape else \"\"\n\n\tif not longtable:\n\t\tstart += \"\\\\resizebox{\\\\columnwidth}{!}{\"\n\n\tstart += \"\\\\def\\\\sym#1{\\\\ifmmode^{#1}\\\\else\\\\(^{#1}\\\\)\\\\fi} \\\\begin{\" + section + \"}{l*{\"+str(colsize)+\"}{c}} \"\n\tstart += \"\\\\hline\\\\hline \"\n\t\n\tfor i,col in enumerate(df.columns):\n\t\n\t\tif i>idcolumnIndex:\n\t\t\tstart += \" & \\\\multicolumn{1}{c}{(\"+str(i)+\")}\"\n\t\telse:\n\t\t\tstart += \" & \\\\multicolumn{1}{c}{}\"\n\t\n\tstart += \" \\\\\\\\\"\n\t\n\tfor i,col in enumerate(df.columns):\n\t\tif i>idcolumnIndex:\n\t\t start += \" & \\\\multicolumn{1}{c}{\"+str(col)+\"}\"\n\t\telse:\n\t\t\tstart += \" & \\\\multicolumn{1}{c}{}\"\n\n\tstart += \"\\\\\\\\ \\\\hline\"\n\tdat = df.to_dict()\n\tfor i in range(nrows):\n\t\tstart += \"\\\\\\\\\"\n\t\trow = [dat[col][i] for col in df.columns]\n\t\tfor c in row:\n\t\t\tstart += \" & \" + format_data(c)\n\n\n\tend = \"\"\n\tif legend:\n\t end += \"\\\\\\\\ \\\\hline\\\\hline \\\\multicolumn{2}{l}{\\\\footnotesize \\\\textit{p}-values in parentheses}\" \n\t end += \"\\\\\\\\ \\\\multicolumn{2}{l}{\\\\footnotesize \\sym{*} \\\\(p<0.05\\\\), \\\\sym{**} \\\\(p<0.01\\\\), \\\\sym{***} \\\\(p<0.001\\\\)}\"\n\t\n\tend += \"\\\\\\\\ \\\\end{\" + section + \"}\"\n\n\tif not longtable:\n\t\tend += \"}\"\n\n\tif landscape:\n\t end += \"\\\\end{landscape}\" \n\n\treturn start + end", "def print_queue(queue):\n print(tabulate.tabulate(queue,headers=['Time','Priority','Action','Argument','kwargs'],\n floatfmt=(\".12f\")))", "def _print_report(self, result: Dict[str, Dict[str, float]]):\n\n print('\\n\\tprecision recall f1_score num')\n for type_ in self.types:\n print(type_, end='\\t')\n print('{0: .3f}'.format(result[type_]['precision']), end=' ')\n print('{0: .3f}'.format(result[type_]['recall']), end=' ')\n print('{0: .3f}'.format(result[type_]['f1_score']), end=' ')\n print('{0: d}'.format(result[type_]['num']), end='\\n')", "def __str__(self):\n table_string = ''\n values = [x * y for x in range(1, self.x + 1)\n for y in range(1, self.y + 1)\n ]\n for value in range(1, len(values) + 1):\n if value % self.x == 0:\n table_string += f'{values[value - 1]}\\n'\n else:\n table_string += f'{values[value - 1]} | '\n return table_string", "def print_result(result, label = \"\"):\n\n if isinstance(label, str) and label:\n print(f\"\\n\\n\\t{label}:\")\n\n if not result:\n print(\"\\tNO RESULTS\")\n return\n\n #if result is string\n if isinstance(result, str):\n print(\"\\n\\t\" + result)\n return\n #if result is integer\n elif isinstance(result, int):\n print(\"\\n\\t\" + result)\n return\n\n text = \"\"\n max_length = [] # max length of item for each column\n titles = []\n\n # if its a list of lists\n if isinstance(result, list): \n # if label we received doesnt give as titles for columns\n if not isinstance(label, list):\n for i in range(len(result[0])):\n titles.append(\"\")\n elif len(label) >= len(result[0]):\n for i in range(len(result[0])):\n titles.append(label[i])\n elif len(label) < len(result[0]):\n print_error_message(\"\\n\\n\\tERROR: NUMBER OF TITLES MUST MATCH NUMBER OF COLUMNS\")\n return\n \n if isinstance(result, dict):\n alist = []\n for key in result:\n alist.append([key,result.get(key)])\n print_table(alist, label)\n return\n\n print_table(result, titles)", "def tabulate(self):\n for test_name, test in self.test_types.items():\n for ivs_name, ivs in self.ivs.items():\n if self.verbose:\n print(\"{0}: {1}\".format(ivs_name, test_name))\n tree = test(ivs)\n if not tree:\n continue\n score = tree.score(True)\n if self.verbose > 1:\n tree.print_structure()\n\n self.result_matrix['ivs name'][ivs_name][test_name] = score\n self.result_matrix['test type'][test_name][ivs_name] = score", "def display_table(a, m):\n # Initialize string\n result = ''\n result += '{'\n\n # Add all polynomials to the string, given they are already a string\n for i in a:\n for j in i[:-1]:\n result += display_poly(j, m)\n result += ', '\n\n # Add the last one here to prevent unneeded comma\n result += display_poly(i[-1], m)\n result += '; '\n\n # Remove final semicolon and close the brace\n result = result[:-2]\n result += '}'\n\n return result", "def __debug_print_questions__(self):\n for k in sorted(self.questions.keys()):\n print(\"Question: %s\" %k)\n for a in self.questions[k].answers:\n print(\"\\t%s\" % a)", "def displayGame(self):\n # row1 & row2 longer, row3 & row4 shorter, proper indented below\n print 'current table:'\n for key in ['row1','row2']:\n rowLs = self.table[key]\n string = ''\n for ele in rowLs:\n tmpStr = str(ele) + '\\t'\n string += tmpStr\n print string\n for key in ['row3','row4']:\n string = '\\t'\n rowLs = self.table[key]\n for ele in rowLs:\n tmpStr = str(ele) + '\\t'\n string += tmpStr\n print string \n print 'discardList:'\n print self.discardLs[0],'\\t',self.discardLs[1],'\\n',self.discardLs[2],'\\t',self.discardLs[3]", "def help_description():\n # for ain\n print(\"--------TABLE FOR AIN(AIN4=GND)-------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | AINP | AINN |\")\n for i in range(8):\n print(\"| {} | {} | AIN{} | AIN{} |\".format(str(i), bin(i)[2:].zfill(3), DICT_AIN[i][0],\n DICT_AIN[i][1]))\n print(\"--------------------------------------\")\n print(\"------------TABLE FOR FSR------------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | FSR |\")\n for i in range(6):\n print(\"| {} | {} | {} |\".format(str(i), bin(i)[2:].zfill(3), DICT_FSR[i]))\n print(\"--------------------------------------\")\n print(\"------------TABLE FOR RATE------------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | RATE |\")\n for i in range(8):\n print(\"| {} | {} | {} |\".format(str(i), bin(i)[2:].zfill(3), DICT_RATE[i].rjust(7, ' ')))\n print(\"--------------------------------------\")", "def pretty(self):\n #table = [\"\".join([\"%8s \" % s for s in self.alpha.getSymbols()])]\n table = []\n for row in PWM.getFreq(self):\n table.append(\"\".join([\"%8.6f \" % y for y in row]))\n return table", "def print_table(table, title_list):\n table.insert(0, title_list)\n for row_index, row in enumerate(table):\n for col_index, col in enumerate(row):\n if (type(col) == float) or (type(col) == int):\n table[row_index][col_index] = str(\"{0:,.2f}\".format(col))\n widths = [max(map(len, col)) for col in zip(*table)]\n sum_of_widths = sum(widths) + len(table[0]) * 3 - 1\n for row in table:\n print(\"-\" * sum_of_widths)\n print(\"|\" + \" \".join((val.ljust(width) + \"|\" for val, width in zip(row, widths))))\n print(\"-\" * sum_of_widths)", "def print_table(table, title_list):\n table.insert(0, title_list)\n for row_index, row in enumerate(table):\n for col_index, col in enumerate(row):\n if (type(col) == float) or (type(col) == int):\n table[row_index][col_index] = str(\"{0:,.2f}\".format(col))\n widths = [max(map(len, col)) for col in zip(*table)]\n sum_of_widths = sum(widths) + len(table[0]) * 3 - 1\n for row in table:\n print(\"-\" * sum_of_widths)\n print(\"|\" + \" \".join((val.ljust(width) + \"|\" for val, width in zip(row, widths))))\n print(\"-\" * sum_of_widths)", "def print_data(self):\n total_score = 0.0\n\n title_game = 'Game'\n title_word = 'Word'\n title_word_status = 'Word Status'\n title_bad_guesses = 'Bad Guesses'\n title_missed_letters = 'Missed Letters'\n title_total_score = 'Total score'\n\n if not record_word:\n print(\"No words played.\")\n else:\n print('%-5s %-10s %-12s %-5s %-5s %s' %(title_game,title_word, title_word_status, title_bad_guesses, title_missed_letters,title_total_score))\n print('---- ---- ------------ ----------- -------------- -----------')\n for x in range(len(record_word)):\n print('%-5s %-10s %-13s %-11s %-13s %.2f'%(record_game[x],record_word[x],record_word_status[x],record_bad_guesses[x],record_missed_letters[x],record_total_score[x]))\n\n for x in range(len(record_total_score)):\n total_score = total_score + record_total_score[x]\n\n print('\\nFinal Score: %.2f' %total_score)", "def displayHTMLtable(acc_sent2, acc_wv03, acc, prec_sent2, prec_wv03, prec, recall_sent2, recall_wv03, recall):\n\n methods = ['Sent2 NBR', 'WV03 NBR', 'WV03 RF']\n accuracies = [\"{:.2%}\".format(acc_sent2), \"{:.2%}\".format(acc_wv03), \"{:.2%}\".format(acc)]\n precisions = [\"{:.2%}\".format(prec_sent2), \"{:.2%}\".format(prec_wv03), \"{:.2%}\".format(prec)]\n recalls = [\"{:.2%}\".format(recall_sent2), \"{:.2%}\".format(recall_wv03), \"{:.2%}\".format(recall)]\n\n data = methods + accuracies + precisions + recalls\n\n data = np.reshape(data, (4, 3)).T\n\n display(HTML(\n '<table style=\"width:100%;\"><th>Method</th><th>Accuracy</th><th>Precision</th><th>Recall</th><tr>{}</tr></table>'.format(\n '</tr><tr>'.join(\n '<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in data)\n )\n ))", "def print_boolean_matrix(true, pred):\n classes = list(true)\n classes.extend(pred)\n classes = list(set(classes))\n matrix_true = dict()\n matrix_false = dict()\n for c in classes:\n matrix_true[c] = 0\n matrix_false[c] = 0\n\n precision, recall, _, _ = score(true, pred, labels=classes)\n\n for i in range(len(true)):\n label_true = true[i]\n label_pred = pred[i]\n if label_true == label_pred:\n matrix_true[label_true] += 1\n else:\n matrix_false[label_true] += 1\n\n print('\\\\begin{table}[h]')\n print('\\\\centering')\n print('\\\\caption{Boolean Matrix}')\n print('\\\\label{boolean_matrix}')\n print('\\\\begin{tabular}{|r|r|r|r|r|}')\n print(' \\\\hline')\n print \"Label & Predicted Correctly & Predicted Incorrectly & Precision & Recall \\\\\\\\ \\\\hline\"\n for i in range(len(classes)):\n print \"{} & {} & {} & {:0.2f} & {:0.2f} \\\\\\\\ \\\\hline\".format(classes[i], matrix_true.get(classes[i], 0), matrix_false.get(classes[i], 0), precision[i], recall[i])\n print \"\\\\multicolumn{{3}}{{|l|}}{{Weighted Average}} & {:0.2f} & {:0.2f} \\\\\\\\ \\hline\".format(precision_score(true, pred, average='weighted'), recall_score(true, pred, average='weighted'))\n print('\\\\end{tabular}')\n print('\\\\end{table}')", "def print_tables(self, amino_df, zinc_df, food_df):\n\n # Output amino acid nutrients dataframe to a csv file\n amino_df.to_csv('amino_acid_food.csv', sep=',')\n print \"A table of amino acids found in different food is saved as amino_acid_food.csv \"\n # Output zinc content of food groups dataframe to a csv file\n zinc_df.to_csv('zinc_FoodGroup.csv', sep=',')\n print \"The zinc value in food belongs to different food groups is saved as zinc_FoodGroup.csv \"\n # Output food group dataframe to a csv file\n food_df.to_csv('FoodGroup.csv', sep=',')\n print \"A table of food names categorized to different food groups is saved as FoodGroup.csv \"" ]
[ "0.75249547", "0.731208", "0.72350246", "0.72027415", "0.7168257", "0.7144765", "0.71109647", "0.7010002", "0.68452394", "0.6828683", "0.6754718", "0.6754718", "0.6742807", "0.6638073", "0.66361946", "0.66236633", "0.66209453", "0.6590905", "0.656231", "0.65480316", "0.6540686", "0.65321463", "0.6524011", "0.65214294", "0.6516316", "0.650876", "0.6504861", "0.64869356", "0.64413315", "0.6400874", "0.63783854", "0.63783854", "0.6371774", "0.63678646", "0.63648367", "0.6361265", "0.6347148", "0.63444835", "0.6344017", "0.6305168", "0.6291523", "0.6284209", "0.6277796", "0.6246226", "0.62385935", "0.6232456", "0.6225737", "0.6221752", "0.621855", "0.62011486", "0.61982006", "0.6195275", "0.619188", "0.61898625", "0.6174923", "0.6172295", "0.6171609", "0.6171035", "0.61685914", "0.61489886", "0.6147538", "0.61444473", "0.6136877", "0.6131097", "0.6120473", "0.6114219", "0.61107725", "0.6103308", "0.60831934", "0.6082216", "0.6081338", "0.60729057", "0.60705", "0.6068529", "0.60540587", "0.6004459", "0.60038257", "0.6003572", "0.6003169", "0.59987545", "0.59899634", "0.59856266", "0.59829795", "0.5976326", "0.59740615", "0.59710544", "0.59700507", "0.5962611", "0.5952351", "0.5950701", "0.59500736", "0.5947487", "0.5937451", "0.5933673", "0.5930205", "0.5930205", "0.5918782", "0.5917565", "0.5892771", "0.58801943" ]
0.6439023
29
opens a tile at the respective coordinates on the table_state list.
def open_tile(self, y, x): # Find the letter index and convert into a y-coordinate. # Checks if it is a mine if [y, x] in self.mine_locations: # explode self.show_answer_board([y, x]) print "Boomz." return Minesweeper.IS_A_BOMB else: # strip(?)tease to the user (oh damn sexy numbers) self.tease_user(y, x) return Minesweeper.NOT_A_BOMB
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_tile(self):\n # replace with your code\n pass", "def new_tile(self):\n rowm, colm = self.get_ava_index()\n value = 2 if random() <= 0.90 else 4\n self.set_tile(rowm, colm, value)\n print rowm,colm,value", "def new_tile(self):\r\n # replace with your code\r\n empty_square_lists = []\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if(self.get_tile(row, col) == 0):\r\n empty_square_lists.append((row, col))\r\n \r\n if len(empty_square_lists) == 0:\r\n return \"game over!\"\r\n \r\n random_cell = random.choice(empty_square_lists)\r\n random_cell_row = random_cell[0]\r\n random_cell_col = random_cell[1]\r\n \r\n values = [2] * 90 + [4] * 10\r\n value = random.choice(values)\r\n \r\n self.set_tile(random_cell_row, random_cell_col, value)", "def tile(self, x: int, y: int):\n return self.awmap.tile(x, y)", "def __init__(self):\n self.board = {} # dict of (x,y) to PlacedTile\n self.board[(0,0)] = STARTING_PIECE", "def new_tile(self):\n # replace with your code\n empty_list = []\n counter_1 = 0\n for _ in self._grid:\n counter_2 = 0\n line = _\n for blank in line:\n if blank == 0:\n blank_tile = (counter_1, counter_2)\n empty_list.append(blank_tile)\n counter_2 += 1\n else:\n counter_2 += 1\n counter_1 += 1\n #print empty_list\n \n self._tile = empty_list[random.randrange(len(empty_list))]\n \n value = [2,2,2,2,2,2,2,2,2,4]\n tile_value = value[random.randint(0,9)]\n \n self.set_tile(self._tile[0], self._tile[1], tile_value)", "def tile(self):\n raise RuntimeError('Not implemented')", "def new_tile(self):\n \n empty_items = []\n for row in range(self.get_grid_height()):\n for col in range(self.get_grid_width()):\n if self.get_tile(row, col) == 0:\n empty_items.append((row, col))\n \n random_row = 0\n random_col = 0\n if len(empty_items) != 0:\n random_empty_tile = random.randrange(0, len(empty_items))\n (random_row, random_col) = empty_items[random_empty_tile]\n else:\n return\n # the % of getting \"4\" from 0~9 is 10%\n random_time = random.randrange(0, 10)\n \n if random_time == 4:\n self._cells[random_row][random_col] = 4\n else:\n self._cells[random_row][random_col] = 2", "def set_cell(self, x, y, tile_index):\n data_index = x + y * self._size[0] # type: int\n # self._data[data_index] = tile_index\n #\n # if self._sprites[data_index]:\n # self._sprites[data_index].delete()\n # self._sprites[data_index] = None\n\n # Release resources\n if self._tiles[data_index]:\n self._tiles[data_index].delete()\n self._tiles[data_index] = None\n\n # Only create sprite when not zero\n if tile_index:\n tile_prototype = self._tile_set.get(tile_index, None) # type: Optional[Tile]\n if not tile_prototype:\n raise TileSetError(\"tile set does not contain tile for index %s\" % tile_index)\n\n tile_w, tile_h = self._tile_size_2d\n i, j, _k = cart_to_iso(x, y, 0)\n ax, ay = tile_prototype.anchor\n tile_x, tile_y = i * tile_w - ax, j * tile_h - ay\n\n tile = deepcopy(tile_prototype)\n tile.sprite = pyglet.sprite.Sprite(tile.image, tile_x, tile_y)\n tile.aabb3d.pos = float(x), float(y), 0.0\n tile.aabb2d.pos = tile_x, tile_y\n self._tiles[data_index] = tile\n # self._sprites[data_index] = pyglet.sprite.Sprite(tile.image, tile_x, tile_y)\n\n # Currently only supports a single level, so everything is on z-level 0\n # self._aabb3d[data_index] = AABB3D(float(x), float(y), 0.0, tile.size[0], tile.size[1], tile.size[2])\n # self._aabb2d[data_index] = AABB2D(tile_x, tile_y, tile_w, tile_h)", "def __init__tiles__(self):\n return [[Tiles(i, j, Tiles.closed) for j in range(self.cols)] for i in range(self.rows)]", "def open_tile(self, i, j):\n # Checks for invalid moves.\n if self.game_lost or not self.valid_tile(i, j):\n return []\n if self.tiles[i][j].category != Tiles.closed:\n return []\n if self.game_won:\n return []\n\n # Redistributes mine field and numbers tiles for the first move of the game.\n if self.opened == 0:\n self.adjust_minefield(i, j)\n self.enumerate_tiles()\n \n # Counts the number of tiles opened for checking game winning moves.\n self.opened += 1\n\n # Sets the current closed tile equal to the opened board tile.\n self.tiles[i][j] = self.board[i][j]\n\n # Checks for game ending moves.\n if (self.opened + self.mines) == (self.rows * self.cols):\n self.game_won = True\n if self.tiles[i][j].category == Tiles.mine:\n self.game_lost = True\n\n # Opens adjacent tiles as needed.\n elif self.tiles[i][j].category == Tiles.zero:\n return self.open_adjacents(i, j, [self.tiles[i][j]])\n\n return [self.tiles[i][j]]", "def new_tile(self):\n zero_list = []\n zero_cell = ()\n # self._cells = [[0 for col in range(self._grid_width)] for row in range(self._grid_height)]\n for row in range(self._grid_height):\n for col in range(self._grid_width):\n if self._cells[row][col] == 0:\n zero_cell = (row, col)\n zero_list.append(zero_cell)\n if len(zero_list) > 0:\n chance = random.randrange(0,10)\n cell_idx = random.randrange(len(zero_list))\n if chance == 9:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 4\n else:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 2\n else:\n print(\"You lost! Better luck next time!\")", "def changeTile (self, posY, posX, tile=\"t\"):\r\n self.grid[posY][posX] = tile", "def populate_board(self):\n for row in range(10):\n for col in range(10):\n coord = Coordinate(row, col)\n coord_attack = Coordinate(row, col)\n self.player_table.setItem(row, col, coord)\n self.attack_table.setItem(row, col, coord_attack)", "def getGameState(self):\n row1 = [0, 0, 0]\n row2 = [0, 0, 0]\n row3 = [0, 0, 0]\n tilePosStatement = Statement()\n posTerm1 = Term('?x')\n posTerm2 = Term('?y')\n posTerm3 = Term('?tile')\n tilePosStatement.terms = (posTerm1, posTerm2, posTerm3)\n tilePosStatement.predicate = 'tilePos'\n for fact in self.kb.facts:\n if match(fact.statement, tilePosStatement):\n if fact.statement.terms[2] == Term(Constant('tile1')):\n term = 1\n if fact.statement.terms[2] == Term(Constant('tile2')):\n term = 2\n if fact.statement.terms[2] == Term(Constant('tile3')):\n term = 3\n if fact.statement.terms[2] == Term(Constant('tile4')):\n term = 4\n if fact.statement.terms[2] == Term(Constant('tile5')):\n term = 5\n if fact.statement.terms[2] == Term(Constant('tile6')):\n term = 6\n if fact.statement.terms[2] == Term(Constant('tile7')):\n term = 7\n if fact.statement.terms[2] == Term(Constant('tile8')):\n term = 8\n if fact.statement.terms[2] == Term(Constant('empty')):\n term = -1\n if fact.statement.terms[0] == Term(Constant('pos1')):\n col = 0\n elif fact.statement.terms[0] == Term(Constant('pos2')):\n col = 1\n elif fact.statement.terms[0] == Term(Constant('pos3')):\n col = 2\n if fact.statement.terms[1] == Term(Constant('pos1')):\n row1[col] = term\n\n elif fact.statement.terms[1] == Term(Constant('pos2')):\n row2[col] = term\n\n elif fact.statement.terms[1] == Term(Constant('pos3')):\n row3[col] = term\n\n row1 = tuple(row1)\n row2 = tuple(row2)\n row3 = tuple(row3)\n result = (row1, row2, row3)\n return result\n\n ### Student code goes here", "def __init__(self,size,tilelist,buttonflag):\n\n # Initialize the screen class\n BaseScreen.__init__(self,size)\n\n # Create the list of tile objects and draw them on the screen\n self.tilelist = tilelist\n xlen = self.tilelist[0][0].image.get_width()\n ylen = self.tilelist[0][0].image.get_height()\n for x in range(0,size[0],xlen):\n for y in range(0,size[1],ylen):\n try:\n self.image.blit(self.tilelist[x // xlen][y // ylen].image,(x,y))\n self.tilelist[x // xlen][y // ylen].set_position((x,y))\n except:\n pass\n\n # Set up an empty button list and the buttonflag\n self.buttonlist = []\n self.buttonflag = buttonflag", "def __init__(self, width, height):\n self.width = width\n self.height = height\n self.numTiles = width * height\n\n for w in range(0,width):\n for h in range(0,height):\n #NOTE--float width,height as tuple keys don't work?!\n #so could not use Position(), since those x,y's can be floats\n #tuples of ints (w,h) could be used\n self.tiles[(w,h)] = 0 # value of key tuple (w,h) = 0 = dirty (or vice versa, 1 = clean)\n #self.printTiles()\n #raise NotImplementedError", "def load_frame(self):\n world_map = self.data[self.time_point][\"tiles\"]\n self.tiles = []\n for x in range(self.width):\n for y in range(self.height):\n index = x + self.width * y\n tile = world_map[index]\n xpos = x * tile_size\n ypos = y * tile_size\n if tile[\"type\"] == \"Wall\":\n sprite = pyglet.sprite.Sprite(images[\"Wall\"], x=xpos, y=ypos)\n elif tile[\"type\"] == \"SnakeHead\":\n sprite = pyglet.sprite.Sprite(images[\"SnakeHead\"], x=xpos, y=ypos)\n elif tile[\"type\"] == \"SnakeBody\":\n sprite = pyglet.sprite.Sprite(images[\"SnakeBody\"], x=xpos, y=ypos)\n elif tile[\"type\"] == \"Doodah\":\n sprite = pyglet.sprite.Sprite(images[\"Doodah\"], x=xpos, y=ypos)\n elif tile[\"type\"] == \"Blank\":\n sprite = pyglet.sprite.Sprite(images[\"Blank\"], x=xpos, y=ypos)\n self.tiles.append(sprite)", "def new_tile(self):\r\n # creating a list value to ensure the 90 and 10 percent ratio\r\n value=[2,2,2,2,2,2,2,2,2,2]\r\n position_of_4=random.randrange(0,10)\r\n value[position_of_4]=4\r\n # selecting a random position on the grid\r\n dummy_row=random.randrange(0,self._height)\r\n dummy_column=random.randrange(0,self._width)\r\n # check to ensure that same tiles are not selected\r\n if self._grid[dummy_row][dummy_column]!=0:\r\n while self._grid[dummy_row][dummy_column]!=0:\r\n dummy_row=random.randrange(0,self._height)\r\n dummy_column=random.randrange(0,self._width)\r\n # assigning a value to the selected tile\r\n self._grid[dummy_row][dummy_column]=random.choice(value)", "def make_floor(self):\n\n for y in range(0, self.num_tiles[1] + 1):\n for x in range(0, self.num_tiles[0] + 1):\n offset = (x * self.tile.size[0], y * self.tile.size[1])\n self.image.blit(self.tile.image, offset)", "def new_tile(self):\n # Getting the list of positions of empty tiles\n indices_list = [(i, j) for i, l in enumerate(self._grid)\n for j in xrange(len(l)) if not l[j]]\n \n # Filling the the empty tile with a 2 or a 4\n if indices_list:\n self.set_tile(*choice(indices_list),\n value = 2 if random() <.9 else 4)", "def open(self, path):\n\n # abre el tilemap en formato JSON\n data = JSON.open(path)\n\n # número de tiles en 'x' y 'y'\n self.width = data['width']\n self.height = data['height']\n\n # ancho y alto de los tiles\n self.tilewidth = data['tilewidth']\n self.tileheight = data['tileheight']\n\n # calcula las dimensiones del tilemap en pixeles\n self.rect.w = self.width * self.tilewidth\n self.rect.h = self.height * self.tileheight\n\n # extrae los tilesets\n tilesets = self.tilesets\n for tileset_node in data['tilesets']:\n tileset = TiledTileset(tileset_node, path)\n tilesets.append(tileset)\n self.split_tileset(tileset)\n\n # extrae las capas (layers)\n layers = self.layers\n for layer_node in data['layers']:\n layer = TiledLayer(layer_node)\n layers.append(layer)\n self.arrange_tiles(layer)", "def new_tile(self):\r\n count = 0\r\n tot_count = self.get_grid_width() * self.get_grid_height()\r\n\r\n while count < 2 and tot_count > 0:\r\n # my_list = 4 10% of the time and a 2 90%\r\n my_list = [4] * 10 + [2] * 90\r\n new_tile = random.choice(my_list)\r\n\r\n # Selects a random number from 0 to width * height -1\r\n\r\n spot = random.randint(0, self._grid_height * self._grid_width - 1)\r\n\r\n # sets location to random selection from spot\r\n loc = [spot / self._grid_width, spot % self._grid_width]\r\n # if loc is empty ( == 0 ) sets number, else repeats process.\r\n\r\n if self._board[loc[0]][loc[1]] == 0:\r\n # sets radom selected board tile to new_tile number\r\n self._board[loc[0]][loc[1]] = new_tile\r\n count += 1\r\n tot_count -= 1", "def new_tile(self):\n \n # get random corordinates for new tile\n row = random.randint(0,self._grid_width)\n col = random.randint(0,self._grid_height)\n # keeps generating random tile corordinates for non-empty tile\n while self.get_tile(row,col) != 0:\n row = random.randint(0,self._grid_width)\n col = random.randint(0,self._grid_height)\n \n # get random index of new tile value\n freq = random.randint(0,9)\n if freq == 9:\n self.set_tile(row, col, 4)\n else:\n self.set_tile(row, col, 2)", "def setTile(self, cell, tile):\n assert isinstance(cell, tuple)\n cellx, celly = cell\n\n if cellx < 0 or cellx > self.map_array.shape[0]-1 or celly < 0 or celly > self.map_array.shape[1]-1:\n return\n\n if self.tile_dict.get((cellx, celly)):\n self.canvas.delete(self.tile_dict[(cellx, celly)])\n\n if tile:\n self.map_array[cellx, celly] = tile.tid\n if tile.tid == 0.0:\n return\n map_posx, map_posy = iso(cellx * self.cell_width, celly * self.cell_height)\n image = self.main.main_tilelist.images[tile.tid]\n self.tile_dict[(cellx, celly)] = self.canvas.create_image(map_posx, map_posy, image=image, anchor=tk.N)", "def set_tile(self, row, col, value):\n # replace with your code\n pass", "def __init__(self, tile, color=TileColor.START_COLOR.value):\n self.tile = tile\n self.list_of_coordinates = tile.coordinates\n self.color = color", "def new_tile(self):\r\n random_row = random.randrange(0, self._grid_height)\r\n random_col = random.randrange(0, self._grid_width)\r\n random_choice = random.choice([2]*90 + [4] * 10)\r\n \r\n if 0 in [num for elem in self._cells for num in elem]: \r\n if self._cells[random_row][random_col] == 0:\r\n self._cells[random_row][random_col] = random_choice \r\n else:\r\n self.new_tile()\r\n else:\r\n pass", "def __draw_tiles(self, state):\n tile_to_display_char = {\n Tile.EMPTY: ' ',\n Tile.ORB: 'o',\n Tile.TAIL: curses.ACS_BLOCK,\n }\n\n for y in range(0, self.config.arena_size[1]):\n for x in range(0, self.config.arena_size[0]):\n tile = state.arena[x][y]\n display_char = tile_to_display_char[tile]\n try:\n self.arena_win.addch(y + 1, x + 1, display_char)\n except (curses.error):\n # addch() fails at the bottom-right character because it tries\n # to scroll to a new line but no line exists. Best workaround\n # I could find.\n # https://stackoverflow.com/questions/37648557/curses-error-add-wch-returned-an-error\n pass", "def __handle_view_tile(self, gamestate_component):", "def __init__(self, tiles):\n self.tiles = tiles", "def __init__(self, width, height):\n roomDict = {}\n for w in range(width):\n for h in range(height):\n roomDict[Position(w, h)] = 'dirty'\n self.tiles = roomDict\n self.width = width\n self.height = height", "def query_image_tile(self, coord):", "def new_tile(self):\n random.shuffle(self.tiles) # shuffle the list of tiles tuples\n count = 0\n while self.get_tile(self.tiles[0][0], self.tiles[0][1]) != 0 and count < self.grid_height*self.grid_width: \n self.tiles.append(self.tiles.pop(0)) \n \n # next, select value as 2 with a 90% probability (percentage) and 4 with 10%\n percentage = random.random() \n if percentage > 0.1:\n value = 2\n else:\n value = 4\n row = self.tiles[0][0]\n col = self.tiles[0][1]\n self.set_tile(row , col,value)", "def new_tile(self):\r\n rand_x = random.randrange(self.width)\r\n rand_y = random.randrange(self.height)\r\n while self.get_tile(rand_y, rand_x) != 0:\r\n rand_x = random.randrange(self.width)\r\n rand_y = random.randrange(self.height)\r\n value = random.choice([2,2,2,2,2,2,2,2,2,4])\r\n del self.board[rand_y][rand_x]\r\n self.board[rand_y].insert(rand_x,value)\r\n return self.board", "def open_tile(filename):\n geoimg = gippy.GeoImage(filename, True)\n z, x, y = map(int, geoimg.basename().split('-')[0:4])\n tile = Tile.from_google(google_x=x, google_y=y, zoom=z)\n geoimg.set_srs('EPSG:3857')\n minpt = tile.bounds[0].meters\n maxpt = tile.bounds[1].meters\n affine = np.array(\n [\n minpt[0], (maxpt[0]-minpt[0])/geoimg.xsize(), 0.0,\n maxpt[1], 0.0, -(maxpt[1]-minpt[1])/geoimg.ysize()\n ])\n geoimg.set_affine(affine)\n geoimg.set_nodata(-1)\n return geoimg", "def new_tile(self):\n while True:\n random_row = random.randrange(self._grid_height)\n random_column = random.randrange(self._grid_width)\n if self._grid[random_row][random_column] == 0:\n self._grid[random_row][random_column] = random.choice([2] * 9 + [4])\n break", "def loadTiles():\n with open('resources/map.txt', 'r') as f:\n rows = f.readlines()\n global numCols\n numCols = len(rows[0].split('\\t')) # Assumes all rows contain the same number of tabs\n global numRows\n numRows = len(rows)\n for y in range(numRows):\n cols = rows[y].split('\\t')\n for x in range(numCols):\n tileName = cols[x].replace('\\n', '')\n if tileName == \"StartingRoom\":\n global currentPosition\n currentPosition = [x, y]\n _world[(x, y)] = None if tileName == '' else getattr(__import__('tiles'), tileName) (x, y)", "def makeState(*args,**kwargs):\n \n cells = []\n\n for item in args:\n #print item\n cells.append(item)\n \n newState = State(cells)\n #newState.printBoard()\n return newState", "def new_tile(self):\n col = random.choice(range(self.grid_width))\n row = random.choice(range(self.grid_height))\n if self.grid[row][col] == 0:\n if random.random() >= 0.9:\n self.grid[row][col] = 4\n else:\n self.grid[row][col] = 2\n else:\n self.new_tile()", "def draw_tile(self, tile):\n raise NotImplemented()", "def put_next_tiles(p,tiles):\n if tiles['mode'].upper() == \"INIT\":\n set_value(p,tiles['0']['lig'],tiles['0']['col'],tiles['0']['val']) # mettre la vleur dans le plateau avec la position donnee de tiles\n set_value(p,tiles['1']['lig'],tiles['1']['col'],tiles['1']['val'])\n else:\n set_value(p,tiles['0']['lig'],tiles['0']['col'],tiles['0']['val'])", "def testBasicTile(self):\n pos = (0,0)\n launcher.TextFrame._ResetTiling()\n for i in range(3):\n lc = launcher.TextFrame('big bad window title')\n newpos = lc.GetPositionTuple()\n self.assertTrue(newpos[0] > pos[0])\n self.assertTrue(newpos[1] > pos[1])\n pos = newpos", "def __init__(self, width, height):\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.numTiles = width*height\n\t\tself.tiles = []\n\t\tfor i in range(0, width):\n\t\t\tfor j in range(0, height):\n\t\t\t\tself.tiles.append(Tile(i, j))", "def create_objects(cls, table):\n x = 2\n state = State(table[1][4])\n while x < len(table):\n line = table[x]\n if line[5] == \"powiat\" or line[5] == \"miasto na prawach powiatu\":\n county = County(line[4], line[1])\n state.in_state(county)\n elif line[5] == \"miasto\":\n city = City(line[4], line[1], line[2])\n state.in_state(city)\n elif line[5] == \"gmina miejska\":\n city_community = City_Community(line[4], line[1], line[2])\n state.in_state(city_community)\n elif line[5] == \"gmina wiejska\":\n village_community = Village_Community(line[4], line[1], line[2])\n state.in_state(village_community)\n elif line[5] == \"gmina miejsko-wiejska\":\n city_village_community = City_Village_Community(line[4], line[1], line[2])\n state.in_state(city_village_community)\n elif line[5] == \"obszar wiejski\":\n village_square = Village_square(line[4], line[1], line[2])\n state.in_state(village_square)\n elif line[5] == \"delegatura\":\n delagacy = Delegacy(line[4], line[1], line[2])\n state.in_state(delagacy)\n x+=1\n\n for county in state.in_s:#adding community objects to a proper county\n if type(county) == County:\n for community in state.in_s:\n if community.county_number == county.county_number and type(community) != County:\n county.in_county(community)\n\n return state", "def load_tile(tile):\n return pygame.image.load(tile[\"states\"][\"default\"][0])", "def set_tile(self, row, col, value):\n # replace with your code\n self.grid[row][col] = value", "def test_live_cell(self, alive_cells, alive):\n for positions in alive_cells:\n world = gol.World(3, 3)\n world.set_cell((0, 0))\n for x, y in positions:\n world.set_cell((x, y))\n world.update()\n assert world[(0, 0)] == alive", "def __init__(self, height, width, mines):\n self.x = int(width)\n self.y = int(height)\n self.table_state = [\n ['-' for i in xrange(0, self.x)] for j in xrange(0, self.y)]\n self.mine_locations = self.generate_mines(int(mines))\n self.final_table = self.generate_answer()", "def new_tile(self):\n two_or_four = random.random();\n if two_or_four < 0.9:\n value = 2\n else:\n value = 4\n empty = False\n all_cells = 0\n while empty == False:\n all_cells += 1 \n row = random.choice(range(self._height))\n col = random.choice(range(self._width))\n if self.get_tile(row, col) == 0:\n empty = True\n self.set_tile(row, col, value)\n elif all_cells >= self._height * self._width:\n empty = True", "def click_cell(self, event):\n if (self.world_setable):\n x, y = event.x, event.y\n row = y / self.cell_size\n col = x / self.cell_size\n if ((row in range(self.cell_row)) and\n (col in range(self.cell_col))):\n status_now = not self.world_status.now[row, col]\n if (status_now):\n color = self.color_alive\n else:\n color = self.color_dead\n item_id = self.world[row, col]\n self.canvas.itemconfig(item_id, fill=color)\n self.world_status.now[row, col] = status_now\n self.world_status.next = self.world_status.now.copy()\n self.init_world = self.world_status.now.copy()", "def tile_list(tilefile):\n\t\n\ttf=file(tilefile,\"r\")\n\t\n\ttd=pickle.load(tf)\n\n\ttf.close()\n\treturn td", "def switch(self, tile):\n self.tiles[self.tiles.index(tile)], self.opentile, self.prev = self.opentile, tile, self.opentile\n self.nb_move += 1", "def __init__(self, tiles = []):\n self.tiles = tiles", "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._cells[row][col] = value", "def renderTiles(self, window):\n window.blit(self.TileSurface, (self.x, self.y),(self.textures))", "def __init__(self, spriteSheet, rowcols, sizeXY, initOffset = (0, 0)):\n self.imhash = {}\n self.sprites = spriteSheet\n numrows, numcols = rowcols\n self.w, self.h = sizeXY\n i = 0\n for row in range(numrows):\n for col in range(numcols):\n self.imhash[i] = self.sprites.subsurface((col*self.w, row*self.h, self.w, self.h))\n self.imhash[row, col] = self.imhash[i]\n i += 1\n self.actions = {}", "def make(self,state_board):\n\t\tstate_board[self.column][self.line] = self.couleur #place the piece\n\t\tdrawPiece((self.column,self.line),self.couleur) #draws it on the board\n\t\tfor pos in self.flips: #flips all the pieces in flips\n\t\t\tstate_board[pos[0]][pos[1]] = self.couleur\n\t\t\tdrawPiece(pos,self.couleur) #draws it on the board", "def new_tile(self):\r\n # check if is zero or not\r\n new_tile_added = False\r\n # a list to 2 90% of the time and 4 10% of the time\r\n new_tile_list = [2,2,2,2,2,2,2,2,2,4]\r\n counter = 0\r\n while not new_tile_added:\r\n row_position = random.randrange(0,self.grid_height)\r\n col_position = random.randrange(0,self.grid_width)\r\n if self.grid[row_position][col_position] == 0:\r\n self.grid[row_position][col_position] = random.choice(new_tile_list)\r\n new_tile_added = True\r\n if counter > self.grid_width * self.grid_height:\r\n print 'you failed'\r\n break\r\n\r\n counter +=1", "def choose_open_tile(self):\n import random\n found = False\n while not found:\n pos = [random.randint(0, self.width-1),\n random.randint(0, self.height-1)]\n target_tile = self.get_tile(pos)\n if target_tile.character_can_enter():\n found = True\n\n return pos, target_tile", "def create_tile(self, name):\n return self.subgrids[name[0:2]].tilesys.create_tile(name)", "def make_pygame(height, width, goal_co_ord, tile_size):\n surface_size = (height * tile_size[0], width * tile_size[1])\n surface = pygame.display.set_mode(surface_size)\n surface.fill(pygame.Color('black'))\n\n tiles = []\n for h in range(height):\n for w in range(width):\n tiles.append(Tile(surface, h, w, goal_co_ord, tile_size))\n return surface, tiles", "def create():\n\tprint 'create tile map instance'\n\tsurface = tilemap.new(width, height, maxelevation)\n\t#for i in range(5):\n\t\t#tilemap.generator.rain(surface, 2000)\n\tspringlevel=len(surface)/5\n\tspringrange=springlevel/2\n\tprint springlevel\n\tprint 'run water simulation'\n\tfor i in range(1):\n\t\ttilemap.generator.rain(surface, 40, \n\t\t\tsprings=[s for s in surface.highest(\n\t\t\tspringlevel+(springrange)/(i+1))[springlevel::springrange/5]])\n\tprint 'smooth out heightmap irritations'\n\ttilemap.generator.smoothen(surface,1)\n\tprint 'run grass growing simulation'\n\ttilemap.generator.sprout(surface)\n\tprint 'apply tile map node parameters, compute node polygon coordinates'\n\tsurface.init_mesh()\n\tprint 'return tile map instance'\n\treturn surface", "def set_our_tile(self, x, y, value):\n\t\tif x >= 0 and x < self.w and y >= 0 and y < self.h:\n\t\t\tself.our_tiles[x][y] = value", "def populate_tiles(self):\n\n # grid format :\n # grid(x,y,z)[0]: A valid WorldTile type (i.e. WorldTile.door)\n # grid(x,y,z)[1]: A list of ASCII color or format codes for ColorIze\n # grid(x,y,z)[2]: The tile object\n\n self.t_count = 0 # Tile count, increment for each tile added\n self.build_start = time.clock()\n self.logger.info(\"[*] Starting world building script\")\n\n script_list = [\n self.build_boss_room,\n self.build_rooms,\n self.build_halls,\n self.build_doors,\n self.build_chests,\n self.build_traps,\n self.build_mobs,\n self.build_npcs\n ]\n for func in script_list:\n self.logger.debug(\"\\tRunning {}\".format(func.__name__))\n if not func():\n e_text = \"Build script failed : {}\".format(func.__name__)\n raise AssertionError(e_text)\n\n self.logger.info(\"[*] World building script completed\")\n self.logger.debug(\"\\tTiles Placed : {}\".format(self.t_count))\n build_time = time.clock()-self.build_start\n self.logger.debug(\"\\tTook {}s\".format(build_time))\n self.logger.debug(\"\\tTiles/s : {}\".format(t_count/build_time))", "def set_tile(self, row, col, value):\n # replace with your code\n self._cells[row][col] = value", "def test_create_tile_puzzle(self):\n p = hw.create_tile_puzzle(3, 3)\n self.assertEqual(p.get_board(), [[1,2,3],[4,5,6],[7,8,0]])\n p = hw.create_tile_puzzle(2, 4)\n self.assertEqual(p.get_board(), [[1,2,3,4],[5,6,7,0]])\n p = hw.create_tile_puzzle(1, 4)\n self.assertEqual(p.get_board(), [[1,2,3,0]])", "def addTiles(self, rows, cols, minecount):\n for row in range(rows):\n self.tiles.append([])\n for col in range(cols):\n tile = Tile(self, row, col)\n tile.grid(row=row+1, column=col)\n self.tiles[row].append(tile)\n #left click listeners\n tile.bind('<ButtonPress-1>', self.pressTile)\n tile.bind('<ButtonRelease-1>', self.showTile)\n #middle click listeners\n tile.bind('<ButtonPress-2>', self.pressAdjTiles)\n tile.bind('<ButtonRelease-2>', self.showAdjTiles)\n #right click listeners\n tile.bind('<ButtonPress-3>', self.pressTile)\n tile.bind('<ButtonRelease-3>', self.toggleFlag)", "def add_tile(self, coordinate, tile):\n self._maze[coordinate] = tile", "def createTiles():\n Renderer.Clear()\n map = []\n w, h = len(testmap[0]), len(testmap)\n x, y = 0, 0\n for row in testmap:\n for char in row:\n map.append(makeTile(char, x, y))\n x += 1\n y += 1\n x = 0\n\n return map, w, h", "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._grid_tile[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\r\n self.grid[row][col] = value", "def __init__(self):\n self.grid = {}\n for i in range(21):\n self.grid[i] = [' ']*21\n self._len_x = len(self.grid[0])\n self._len_y = len(self.grid)\n self.forbidden_tiles = []\n self.allowed_tiles = []\n self.exit = None\n self.entrance = None", "def locations_to_state(locations: list) -> list:\n\n state = []\n for i in range(0, 4):\n state.append([])\n # The first layer of the list consists of the four rows of a state\n for j in range(0, 4):\n state[i].append(-1)\n \"\"\" The second layer consists of the four tiles of a row (one of them could be \n the blank).\"\"\"\n\n for i in range(0, 16):\n state[locations[i][0]][locations[i][1]] = i\n \"\"\" locations[i][0] stores the row number, locations[i][0] stores the column \n number, and i is the number on the tile.\"\"\"\n\n return state", "def make_board(self):\n http = urllib3.PoolManager()\n r = http.request('GET', 'http://www.cse.msu.edu/~ruppmatt/itm891/tiles.pickle')\n tiles = pickle.loads(r.data)\n self.assets = tiles\n self.gameboard = Image.new('RGBA', (64*(self.world_width+2), 64*(self.world_height+2)))\n # Laydown land\n for c in range(0,self.world_width):\n for r in range(0, self.world_height):\n x = (c+1)*64\n y = (r+1)*64\n tile_ndx = np.random.choice(len(tiles['land']))\n self.gameboard.paste(tiles['land'][tile_ndx], (x,y)) \n # Laydown water\n for c in range(0,self.world_width):\n x = (c+1)*64\n yy = (self.world_height+1)*64\n self.gameboard.paste(tiles['water']['edge_north'], (x,0))\n self.gameboard.paste(tiles['water']['edge_south'], (x, yy))\n for r in range(0,self.world_height):\n y = (r+1)*64\n xx = (self.world_width+1)*64\n self.gameboard.paste(tiles['water']['edge_west'], (0,y))\n self.gameboard.paste(tiles['water']['edge_east'], (xx,y))\n self.gameboard.paste(tiles['water']['corner_nw'], (0,0))\n self.gameboard.paste(tiles['water']['corner_sw'], (0,(self.world_height+1)*64))\n self.gameboard.paste(tiles['water']['corner_ne'], ((self.world_width+1)*64,0))\n self.gameboard.paste(tiles['water']['corner_se'], ((self.world_width+1)*64,(self.world_height+1)*64))\n \n # Some land lines\n draw = ImageDraw.Draw(self.gameboard)\n for c in range(0,self.world_width-1):\n y_1 = 64\n y_2 = 64*(self.world_height+1)\n x = (2+c)*64\n draw.line([(x,y_1),(x,y_2)], fill='white', width=1)\n for r in range(0,self.world_height-1):\n y = (2+r)*64\n x_1= 64\n x_2 = 64 * (self.world_width+1)\n draw.line([(x_1,y),(x_2,y)], fill='white', width=1)\n return", "def __init__(self, width, height):\n self.width =width\n self.height = height\n self.box_width = width/self._BOXES_WIDE\n print 'box width: ', self.box_width\n self.box_height = height/self._BOXES_TALL\n\n self.tiles = []\n self.changes = set()\n y = 0\n for i in range(World._BOXES_TALL):\n y += self.box_height\n x = 0\n self.tiles.append([])\n for j in range(World._BOXES_WIDE):\n x += self.box_width\n tile = Tile(self.changes, x, y, self.box_width, self.box_height)\n self.tiles[i].append(tile)", "def getAction(self, state):\n print(state.map)\n for tile in state.map:\n print(tile.topleft)\n print(tile.width)\n print(tile.height)\n print(tile.type)\n\n # left, top, width, height, type\n\n if util.flipCoin(self.epsilon):\n return self.last_move\n move = random.choice([1,2,3,4])\n self.last_move = move\n return move", "def play_game(grid, instruction_list):\n location_x = instruction_list[1]\n location_y = instruction_list[0]\n tile = instruction_list[2]\n\n if tile == 0:\n grid[location_x][location_y] = ' '\n elif tile == 1:\n grid[location_x][location_y] = 'W'\n elif tile == 2:\n grid[location_x][location_y] = 'B'\n elif tile == 3:\n grid[location_x][location_y] = 'H'\n elif tile == 4:\n grid[location_x][location_y] = 'O'\n else:\n print('And I oop...play_game')\n\n return grid", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value;", "def init_board():\n\t# Generates a table 10*10 of 0s with -1 around and the initial state\n\t# of the board with 2 whites and 2 blacks in the middle\n\ttable = [[0 if i != 0 and i != 9 else -1 for i in range(10)] if j != 0 and j != 9 else [-1 for i in range(10)] for j in range(10)] #leaves a -1 line around the whole table of 0s\n\t#initial state is drawn and recorded\n\ttable[4][4] = 2\n\ttable[5][5] = 2\n\ttable[4][5] = 1\n\ttable[5][4] = 1\n\tdrawPiece((4,4),2)\n\tdrawPiece((5,5),2)\n\tdrawPiece((4,5),1)\n\tdrawPiece((5,4),1)\n\treturn table", "def find_tile(self, neighbour_list):\n if neighbour_list[1] == 'tile':\n if neighbour_list[2] == 'trap':\n if neighbour_list[0] == 'north':\n self.lab.disarm('north')\n print 'Disarm north trap'\n elif neighbour_list[0] == 'south':\n self.lab.disarm('south')\n print 'Disarm south trap'\n elif neighbour_list[0] == 'west':\n self.lab.disarm('west')\n print 'Disarm west trap'\n elif neighbour_list[0] == 'east':\n self.lab.disarm('east')\n print 'Disarm east trap'\n return True\n else:\n if neighbour_list[1] == 'toby':\n if neighbour_list[0] == 'north':\n self.moveNorth()\n elif neighbour_list[0] == 'south':\n self.moveSouth()\n elif neighbour_list[0] == 'west':\n self.moveWest()\n elif neighbour_list[0] == 'east':\n self.moveEast()\n return False", "def make_state() -> state.GameState:\r\n dung: world.Dungeon = worldgen.EmptyDungeonGenerator(20, 20).spawn_dungeon(0)\r\n p1x, p1y = dung.get_random_unblocked()\r\n p2x, p2y = dung.get_random_unblocked()\r\n while (p2x, p2y) == (p1x, p1y):\r\n p2x, p2y = dung.get_random_unblocked()\r\n ent1 = entities.Entity(1, 0, p1x, p1y, 10, 10, 2, 1, [], dict())\r\n ent2 = entities.Entity(2, 0, p2x, p2y, 10, 10, 2, 1, [], dict())\r\n return state.GameState(True, 1, 1, 2, world.World({0: dung}), [ent1, ent2])", "def set_tile(self, row, col, value):\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\r\n self._grid[row][col]=value", "def icon_maker(self, n, icon_x, icon_y):\n sprite = displayio.TileGrid(self.sprite_sheet, pixel_shader=self.palette, width=1,\n height=1, tile_width=40, tile_height=40, default_tile=n,\n x=icon_x, y=icon_y)\n return sprite", "def fill_with_random_tiles(self):\n for elem in [x[1] for x in self.tile_grid.values()]:\n self.view.remove(elem)\n tile_grid = {}\n # Fill the data matrix with random tile types\n while True: # Loop until we have a valid table (no imploding lines)\n for x in range(COLS_COUNT):\n for y in range(ROWS_COUNT):\n tile_type, sprite = choice(self.available_tiles), None\n tile_grid[x, y] = tile_type, sprite\n if len(self.get_same_type_lines(tile_grid)) == 0:\n break\n tile_grid = {}\n\n # Build the sprites based on the assigned tile type\n for key, value in tile_grid.items():\n tile_type, sprite = value\n sprite = self.tile_sprite(tile_type, self.to_display(key))\n tile_grid[key] = tile_type, sprite\n self.view.add(sprite)\n\n self.tile_grid = tile_grid", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self.grid[row][col]", "def __init__(self, world, location, elevation):\n LandCell.__init__(self, world, location, elevation)", "def __draw(self, state:dict):\n _, ax = plt.subplots()\n ax.set_axis_off()\n tb = Table(ax, bbox=[0,0,1,1])\n\n width = height = 1.0 /9 \n\n\n for key in self.state.keys():\n # Add cells\n i,j = self.__display_table_map[key]\n tb.add_cell(i, j, width, height, text='{}'.format(state[key]), \n loc='center',facecolor= self.__color_map[key])\n\n ax.add_table(tb)\n plt.show()", "def __init__(self, i, j):\n pygame.sprite.Sprite.__init__(self)\n #self.image = pygame.Surface([30,30])\n #self.image.fill(self.floorColor)\n self.image = pygame.image.load('dungeon_floor.png').convert_alpha()\n self.pos = (i*30,j*30,)\n self.rect = pygame.Rect(i*30, j*30, 30, 30)\n self._layer = 2", "def tile(self, z, x, y):\n logger.debug(_(\"Render tile %s\") % ((z, x, y),))\n mercator = GlobalMercator(False,tilesize,[z])\n return self.render(mercator.tile_bbox((z, x, y)))", "def set_tile(self, row, col, value):\r\n del self.board[row][col]\r\n self.board[row].insert(col,value)\r\n return self.board", "def __init__(self):\n\n self._length = 8\n self.board = []\n self.columns = \"ABCDEFGH\"\n for colNum in range(0, self._length):\n self.board.append([])\n for rowNum in range(0, self._length):\n self.board[colNum].append(Tile(colNum, rowNum))\n\n self.board[3][3].color = \"blue\"\n self.board[3][4].color = \"red\"\n self.board[4][3].color = \"red\"\n self.board[4][4].color = \"blue\"", "def create_image(self, image_location, width, height):\n tile_image = pygame.image.load(image_location).convert_alpha()\n # The tile is a square and the height is expected to be smaller than the width\n tile_width = width\n tile_height = height\n tile_image = pygame.transform.scale(tile_image, (tile_width, tile_height))\n\n # The self.image attribute expects a Surface, so we can manually create one and \"blit\" the tile image onto the surface (i.e. paint an image onto a surface).\n # We use list comprehension to quickly make the blits_data list of tuples (each tuple has the tile image, and the X and Y coordinates)\n # Don't know what list comprehension is? Go look it up on the Internet. That's what all professional software engineers do ;)\n image = pygame.Surface((width, height))\n blits_data = [(tile_image, (tile_width * i, 0)) for i in range(math.ceil(width / tile_width))]\n image.blits(blits_data)\n\n return image", "def genNewTile(self):\n # Find which tiles are empty\n emptyList = []\n for i in range(4):\n for j in range(4):\n tileKey = 4*i + j\n if self.isTileEmpty(i, j):\n emptyList.append(tileKey)\n\n # If there's no empty tiles, return false\n if len(emptyList) == 0:\n return False\n\n # Choose tile from empty tiles and fill in 2 or 4.\n newTileKey = choice(emptyList)\n iNew = newTileKey//4\n jNew = newTileKey%4\n if random() > PROB_2:\n self.tiles[iNew][jNew] = 4\n else:\n self.tiles[iNew][jNew] = 2\n return True", "def load_tile_table(filename, width, height):\n\ttry: \n\t\ttile_table = []\n\t\timage = pygame.image.load(filename).convert()\n\texcept:\n\t\tprint(\"Could not load tileset:\", filename)\n\telse:\n\t\timage_width, image_height = image.get_size()\n\t\tfor tile_x in range(0, int(image_width/width)):\n\t\t\tline = []\n\t\t\ttile_table.append(line)\n\t\t\tfor tile_y in range(0, int(image_height/height)):\n\t\t\t\trect = (tile_x*width, tile_y*height, width, height)\n\t\t\t\tline.append(image.subsurface(rect))\n\treturn tile_table", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid_2048[row][col] = value" ]
[ "0.6234986", "0.6104286", "0.60611725", "0.5864138", "0.5863451", "0.58588135", "0.58218133", "0.5803234", "0.5781789", "0.5778895", "0.57396734", "0.57103294", "0.56988585", "0.56791496", "0.565901", "0.5655893", "0.56288236", "0.5625432", "0.5617881", "0.5593936", "0.5587873", "0.5575849", "0.5538812", "0.5526582", "0.552183", "0.5513268", "0.55072916", "0.55034", "0.54992497", "0.5495162", "0.5489455", "0.54816204", "0.54663277", "0.5458732", "0.545235", "0.5447053", "0.536866", "0.5358158", "0.5353058", "0.53404945", "0.53362244", "0.5334523", "0.5324611", "0.5312639", "0.53077036", "0.53035694", "0.5300368", "0.5284249", "0.5281286", "0.5278882", "0.5276923", "0.52528834", "0.52509165", "0.52475363", "0.5247277", "0.5243356", "0.52415866", "0.523768", "0.52307045", "0.52281153", "0.5227937", "0.5221186", "0.52158326", "0.52050155", "0.5204324", "0.5202327", "0.5202231", "0.52022046", "0.5201942", "0.51927954", "0.51890963", "0.517865", "0.517865", "0.5174082", "0.5171378", "0.51692665", "0.5167747", "0.5164551", "0.515979", "0.51440376", "0.51343954", "0.51335675", "0.5105603", "0.51053196", "0.51046664", "0.51046664", "0.51038253", "0.5102065", "0.5097432", "0.5096493", "0.509603", "0.5093575", "0.5092803", "0.5085435", "0.50780886", "0.507631", "0.5075819", "0.5075664", "0.5075565", "0.5073124" ]
0.55911297
20
Method that check if file at provided url exist.
def file_exist(file_url): try: response = requests.head(file_url) if 200 <= response.status_code < 300: return True return False except ConnectionError: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _url_exists(self, url):\n return url_exists(url)", "def exists(self, url):\n return (self.base_path / url).exists()", "def url_exists(url):\n # Check for URLs we can't validate\n if url.startswith(\"https://kiwiirc.com\"):\n return True\n if url.startswith(\"https://www.projectcalico.org\"):\n return True\n\n try:\n urllib2.urlopen(url)\n return True\n except urllib2.HTTPError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False\n except urllib2.URLError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False", "def _url_exists(url):\n h = httplib2.Http()\n try:\n resp = h.request(url, 'HEAD')\n if resp[0].status == 200:\n return True\n except (httplib2.RelativeURIError, httplib2.ServerNotFoundError):\n return False", "def url_exists(url):\n\n try:\n connection = urlopen(url)\n return connection.getcode() < 400\n except Exception as e:\n return False", "def download_if_not_exists(filename, url):\n if not os.path.exists(filename):\n download_file(filename, url)\n return True\n return False", "def download_if_not_exists(filename, url):\n if not os.path.exists(filename):\n download_file(filename, url)\n return True\n return False", "def exists(self, url):\n url = urlparse.urlparse(url)\n connection = httplib.HTTPConnection(url.hostname, timeout=CONNECTION_TIMEOUT)\n \n try:\n connection.request(\"HEAD\", url.geturl())\n response = connection.getresponse()\n except:\n return False\n \n if str(response.status)[0] not in [\"2\", \"3\"]:\n return False\n \n connection.close()\n return True", "def url_exist(url:str) -> bool:\r\n with closing(requests.head(url, allow_redirects=True)) as r:\r\n return r.ok", "def url_exists(url):\r\n from urllib import parse\r\n res = parse.urlparse(url)\r\n if res.scheme == 'gs':\r\n # blob_name has no '/' prefix\r\n bucket_name, blob_name = res.netloc, res.path[1:]\r\n from google.cloud import storage\r\n storage_client = storage.Client()\r\n bucket = storage_client.get_bucket(bucket_name)\r\n blob = bucket.blob(blob_name)\r\n return blob.exists()\r\n else:\r\n return os.path.exists(res.path)", "def check_remote_file_exists(url, login=None, password=None):\r\n credentials = None\r\n if login and password:\r\n credentials = login, password\r\n\r\n response = requests.get(url,\r\n stream=True,\r\n verify=False,\r\n auth=credentials)\r\n if response.status_code >= 400 or response.status_code < 200:\r\n raise Exception('Returned wrong status code: {}'.format(response.status_code))\r\n\r\n response.close()", "def check_if_exist(self,url):\r\n\t\t\"\"\" verefier si un lien existe \"\"\"\r\n\t\trequest = mechanize.Request(url)\r\n\t\tBAD_REQ = [400,401,404]\r\n\t\ttry :\r\n\t\t\tresponse = mechanize.urlopen(request)\r\n\t\t\tif response.code in BAD_REQ:\r\n\t\t\t\treturn False\r\n\t\t\telse:\r\n\t\t\t\treturn True\r\n\t\texcept urllib2.HTTPError, error:\r\n\t\t\tif error.code in BAD_REQ:\r\n\t\t\t\treturn False\r\n\t\t\telse:\r\n\t\t\t\treturn True", "def is_file_exists(self):\n pass", "def exists(self, path):\n\n # First test for local path\n if os.path.exists(path):\n return True\n\n # We import this here because importing urllib is slow and\n # a significant fraction of numpy's total import time.\n from urllib.request import urlopen\n from urllib.error import URLError\n\n # Test cached url\n upath = self.abspath(path)\n if os.path.exists(upath):\n return True\n\n # Test remote url\n if self._isurl(path):\n try:\n netfile = urlopen(path)\n netfile.close()\n del(netfile)\n return True\n except URLError:\n return False\n return False", "def check_file_exist(self):\n return False", "def file_url(self, url):\n return self.is_regex_url(url, self.is_file_regex)", "def url_check(url):\n try:\n request = urllib.request.Request(url)\n request.get_method = lambda: 'HEAD'\n urllib.request.urlopen(request)\n return True\n \n except ValueError:\n return False\n\n except urllib.request.HTTPError:\n return False\n \n except URLError:\n return False", "def path_exists(path):\n if path.startswith('http://') or path.startswith('https://'):\n return True\n\n return isfile(path)", "def file_exist() -> bool:\n pass", "def _verify_url_exists(url, use_head=False):\n # (str, bool) -> bool\n try:\n if use_head:\n resp = requests.head(url)\n else:\n resp = requests.get(url)\n except requests.exceptions.ConnectionError:\n return False\n\n return resp.status_code in [200, 302]", "def check_url_availability(url):\n\n response = website_alive.get_response_object(url)\n return response.status_code == requests.codes['ok']", "def file_exists(filename: str):\n if osp.exists(filename) is True:\n return True\n else:\n return False", "def check_file_existence(self, filename):\n try:\n for sample in TimeoutingSampler(\n config.GAHOOKS_TIMEOUT, 1, self.machine.fs.exists,\n \"/tmp/%s\" % filename\n ):\n if sample:\n return True\n except APITimeout:\n return False", "def exists_filing(dir, url, length):\n\tfilepath = os.path.join(dir,url.split('/')[-1])\n\treturn os.path.exists(filepath) and (length is None or os.path.getsize(filepath) == length)", "def get_check_url(self,url):\n r = requests.get(url).status_code\n if r==requests.codes.ok:\n return(True)\n else:\n print \"something wrong! status_code: \" + r\n return(False)", "def does_file_exist(self, fn):\n if True:\n print(f\"-=- {fn} found.\")\n return True\n else:\n print(f\"-!- {fn} not found. Try again\")\n return False", "def url_checker(url_str):\n file_msg = fd.Program_Msg(__file__)\n ## Checking input parameters\n if not (isinstance(url_str, str)):\n msg = '{0} `url_str` ({1}) is not a STRING!'.format(file_msg,\n type(url_str))\n raise LSSUtils_Error(msg)\n ##\n ## Checking Website\n request_url = requests.get(url_str)\n if (request_url.status_code != 200):\n msg = '{0} `url_str` ({1}) does not exist!'.format(file_msg, url_str)\n raise LSSUtils_Error(msg)", "def file_exists(self):\r\n if os.path.exists(self.file_path):\r\n return True\r\n else:\r\n return False", "def check_url(url):\n return get_svninfo(url) != {}", "def check_url(url):\n return 'products.json' in url", "def _does_file_exist(file_path):\n return os.path.exists(file_path) and os.path.getsize(file_path) > 0", "def file_exists(filename):\n return os.path.exists(filename)", "def exists(self, _uri):\n #print(\"%s %s\"%(_uri))\n\n\n #-------------------- \n # Query logged files before checking\n #-------------------- \n if (os.path.basename(_uri) in self.fileDict):\n return True\n\n\n\n #-------------------- \n # Clean string\n #-------------------- \n xnatUrl = Xnat.path.makeXnatUrl(self.host, _uri)\n parentDir = Xnat.path.getUriAt(xnatUrl, 'files')\n for i in self.__getJson(parentDir):\n if os.path.basename(xnatUrl) in i['Name']:\n return True \n return False", "def check_if_downloaded( url, debug_print = True ):\n\t# Get pdf filename\n\tfilename = basename( url )\n\tfileno, ext_pdf = splitext( filename )\n\tfor file in listdir( getcwd() ):\n\t\tif fileno in file:\n\t\t\tif debug_print:\n\t\t\t\tprint 'Skipping %s' % ( filename )\n\t\t\treturn True\n\treturn False", "def file_exists(path):\n\n try:\n with open(path):\n return True\n except IOError:\n return False", "def blob_exists(blob_url):\n blob = storage.Object.from_url(blob_url)\n blobex = blob.exists()\n return blobex", "def download_if_needed(url, filename):\n if os.path.exists(filename):\n print \"already exists\"\n else:\n wget.download(url)", "def _get_file(cls, url: str, ende: str) -> bool:\n resposta = requests.get(url)\n if resposta.status_code == requests.codes.OK:\n with open(ende, 'wb') as novo_arquivo:\n novo_arquivo.write(resposta.content)\n return True\n else:\n resposta.raise_for_status()\n return False", "def check_if_file_exists(path):\n\n return os.path.exists(path)", "def exists(path):\n r = requests.head(path)\n # print(r.status_code)\n return r.status_code == requests.codes.ok", "def _check_source (fileurl, path_unzip, outfile) :\n if outfile is not None and os.path.splitext (outfile)[1].lower () == os.path.splitext (fileurl)[1].lower () :\n file = _check_url_file (fileurl, path_download = path_unzip, outfile = outfile)\n return file\n else :\n file = _check_url_file (fileurl, path_download = path_unzip, outfile = None)\n txt = _check_zip_file (file, path_unzip = path_unzip, outfile = outfile)\n if not os.path.exists (txt):\n message = \"hal_core._check_source: unable to find file \" + txt + \" source (\" + fileurl + \")\"\n raise PQHException (message)\n return txt", "def validate_url(url):\n response, content = get_response_from_file(url)\n\n if response == None and content == None:\n response, content = get_response_and_content(url)\n\n if response == None:\n return url, url, 0, \"\", \"N\", \"N\", \"N\", hit(\"No Response\"), \"false\"\n else:\n #print(url, get_visible_text(content))\n return evaluate_content_for_200s(response, url, content)", "def file_exists(cls, path: Path) -> bool:\n return path.exists()", "def file_exists(path):\n if path.startswith('gs://'):\n return gcsio.GcsIO().exists(path)\n else:\n return os.path.exists(path)", "def file_exists(path):\n return os.path.exists(path)", "def check_file(filename: str):\n if os.path.isfile(filename):\n return True\n else:\n raise FileExistsError", "def _file_exists(name):\n try:\n f = open(name)\n f.close()\n return True\n except IOError:\n return False", "def FileExists(file):\n return os.path.exists(file)", "def url_completed(self, url):\n\t\tfor e in self._all_eles(): #!cover\n\t\t\tassert 'files' in e\n\t\t\tif url in e['files']:\n\t\t\t\texists = (e['files'][url] is not None and e['files'][url] is not False)\n\t\t\t\treturn exists, e['files'][url]\n\t\treturn False, None", "def file_exists(self):\n if os.path.isfile(self.file_name):\n return True\n else:\n return False", "def exists(request, pagename, filename):\n fpath = getFilename(request, pagename, filename)\n return os.path.exists(fpath)", "def has_file(path):\n return os.path.exists(path)", "def file_exists(file_path):\n\n return Path(file_path).is_file()", "def resource_exists(uri: Optional[str]) -> bool:\n\n if uri is None:\n return True\n\n # TODO Replace after finding way to pass custom fs through FireO validator\n if uri.startswith(\"gs://\"):\n return True\n\n else:\n # Get file system\n fs, uri = url_to_fs(uri)\n\n # Check exists\n if fs.exists(uri):\n return True\n\n return False", "def file_exists(filename):\n return os.path.isfile(filename)", "def FileCheck(fn):\n try:\n open(fn, \"r\")\n return 1\n except IOError:\n print(\"Error: File does not exist.\")\n return 0", "def file_exists(file_path):\r\n return exists(file_path) and isfile(file_path)", "def file_exists(filename: str) -> bool:\n\n return os.path.exists(filename)", "def file_exists(file_path):\n\n if file_path is None:\n return False\n\n if not os.path.isfile(file_path):\n return False\n\n return True", "def file_exists(file_path):\n\n if file_path is None:\n return False\n\n if not os.path.isfile(file_path):\n return False\n\n return True", "def _check_file_exists(self, filepath, should_exist=True):\n _, _, stderr = self.execute_command(CommandBuilder.list(filepath))\n compare = operator.ne if should_exist else operator.eq\n if compare(len(stderr.readlines()), 0):\n msg = \"not found\" if should_exist else \"already exists\"\n raise OSError(f\"{filepath} {msg} on server\")", "def exists_file(f):\n if os.path.exists(f):\n return True\n return False", "def url_was_found(url=\"localhost:5000/health\"):\n res = requests.get(url).json()\n\n if res['status_code'] == 200:\n return True\n elif res['status_code'] == 404:\n return False\n else:\n raise UnexpectedResponseError(\"Expected 200 OK or 404, got {}.\\n\".format(res['status']), \"Full response : {}\".format(res))", "def file_downloaded(filename):\n fc = pathlib.Path(filename)\n if fc.is_file():\n return True\n else:\n return False", "def isFileExist(file_name):\n return os.path.exists(file_name)", "def exist(self):\n return self.file_path.exists()", "def url_is_alive(url):\n request = urllib.request.Request(url)\n request.get_method = lambda: 'HEAD'\n try:\n urllib.request.urlopen(request)\n return True\n except urllib.request.HTTPError:\n return False\n except urllib.error.URLError:\n return False", "def test_github_file_exists(self):\n for h in self.hyperlinks:\n if h['url'].startswith('https://github.com/cyberbotics/webots/tree/released'):\n path = h['url'].replace('https://github.com/cyberbotics/webots/tree/released',\n os.path.normpath(os.environ['WEBOTS_HOME']))\n self.assertTrue(\n os.path.isfile(path) or os.path.isdir(path),\n msg='Hyperlink \"%s\" is pointing to a non-existing file or directory \"%s\" (in file \"%s\").' %\n (h['md'], path, h['file'])\n )", "def fits_file_exists (filepath):\n return validate_file_path(filepath, FITS_EXTENTS)", "def file_exists(host, fqpath):\n command = \"ls -ld %s\" % fqpath\n rcode, _, rerr = g.run(host, command)\n if rcode == 0:\n return True\n\n g.log.error('File does not exist: %s', rerr)\n return False", "async def has_url(self, url: StrOrURL) -> bool:\n key = self.create_key('GET', url)\n return await self.responses.contains(str(key)) or await self.redirects.contains(str(key))", "def file_exists(file_ref, config):\n find_fn = _find_file(config)\n if _is_remote(file_ref):\n _, file_ref = _get_id_fname(file_ref)\n return find_fn(file_ref)", "def check_url(url=None, parse_url=None):\n return False", "def exists(self, path):", "def if_file_exist(file_name: str) -> bool:\n\n exists = os.path.exists(file_name)\n\n return exists", "def path_exist(filepath):\n\treturn os.path.exists(os.path.basename(filepath))", "def file_exist(file_path):\n return os.path.isfile(file_path)", "def req_CHECKURL(self, url):\n # TODO: what about those MULTI and list to be returned?\n # should we return all filenames or keys within archive?\n # might be way too many?\n # only if just archive portion of url is given or the one pointing\n # to specific file?\n lgr.debug(\"Current directory: %s, url: %s\" % (os.getcwd(), url))\n akey, afile, attrs = self._parse_url(url)\n size = attrs.get('size', None)\n\n # But reply that present only if archive is present\n # TODO: this would throw exception if not present, so this statement is kinda bogus\n akey_fpath = self.get_contentlocation(akey) #, relative_to_top=True))\n if akey_fpath:\n akey_path = opj(self.path, akey_fpath)\n\n # if for testing we want to force getting the archive extracted\n # _ = self.cache.assure_extracted(self._get_key_path(akey)) # TEMP\n efile = self.cache[akey_path].get_extracted_filename(afile)\n\n if size is None and exists(efile):\n size = os.stat(efile).st_size\n\n if size is None:\n size = 'UNKNOWN'\n\n # FIXME: providing filename causes annex to not even talk to ask\n # upon drop :-/\n self.send(\"CHECKURL-CONTENTS\", size) #, basename(afile))\n\n # so it was a good successful one -- record\n self._last_url = url\n else:\n # TODO: theoretically we should first check if key is available from\n # any remote to know if file is available\n self.send(\"CHECKURL-FAILURE\")", "def is_file(self):\n\n url_path = self.url.split('/')\n if re.match(r\".+\\.\\w+\", url_path[-1]):\n # Find <file_name>.<extension>\n return True\n return False", "def fileExists(fileName):\n try:\n fileOpen = open(fileName, 'rt')\n fileOpen.close()\n except FileNotFoundError:\n return False\n else:\n return True", "def file_checker(file_name):\n if os.path.islink(file_name):\n print \"Crypto device Symlink %s exists\" % file_name\n return True\n else: \n try:\n with open(file_name):\n print \"File %s exists\" % file_name\n return True\n except IOError:\n print \"File %s does not exists\" % file_name\n return False", "def fileExist(file):\r\n return os.path.exists(file) and os.path.isfile(file)", "def file_exists(path: str) -> bool:\n\treturn os.path.isfile(path)", "def exists_adv(path):\n # TODO: use selenium\n r = requests.head(path)\n # print(r.status_code)\n return r.status_code == requests.codes.ok", "def checkFile(filename):\n\n\tfileRepo = repertoire + filename + extension # Position du fichier\n\n\ttry: # Essaye d'ouvir en lecture\n\t\ttest = open(fileRepo, \"r\")\n\texcept: # Si on arrive pas a ouvrir le fichier\n\t\treturn 0 # Indique le fichier non existant\n\telse: # Sinon, si le fichier s'est ouvert\n\t\ttest.close() # S'assure de fermer le fichier\n\t\treturn 1 # Indique que le fichier existe", "def is_valid_url(url: str) -> bool:\n try:\n requests.get(url)\n except requests.exceptions.RequestException:\n return False\n return True", "def check_url(url):\n # see also http://stackoverflow.com/questions/2924422\n good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY]\n return get_server_status_code(url) in good_codes", "def file_check(file_location):\n return Path(file_location).is_file()", "def _check_file_exists(self, filename):\n if not os.path.exists(filename):\n print('\\n[-] ERROR: %s is not at the specified path! \\\n Please check the filepath and filename...' \n %filename)\n return False\n return True", "def assure_exists(self, name: str):\n result = self.l2.exists(name)\n if result:\n logging.debug(f'{name} l2 hit')\n return self.l2.get_path(name)\n\n self.l3.download(name, self.l2.get_path(name))\n result = self.l2.exists(name)\n if not result:\n raise Exception('file not found anywhere')\n else:\n logging.debug(f'{name} l3 hit')\n return self.l2.get_path(name)", "def download_if_stale(filepath, fileurl):\n if not os.path.exists(filepath) or needs_refreshing(filepath):\n try:\n urllib.request.urlretrieve(fileurl, filepath)\n except urllib.error.HTTPError:\n print('The {0} is not reachable'.format(fileurl))", "def _existFile(f):\n\treturn os.path.isfile(f)", "def test_url(quartus, part, url):\n print(\"\\rChecking %s/%s \" % (quartus, part), end='')\n try:\n response = urllib.request.urlopen(url)\n headers = response.getheaders()\n return True\n except KeyboardInterrupt:\n sys.exit(1)\n except:\n return False", "def check_for_file(self):\n if self.task.file_name in os.listdir(self.task.file_storage):\n return True\n return False", "def file_exists(path: str) -> bool:\n return os.path.isfile(path)", "def check_url(url):\n good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY]\n return get_server_status_code(url) in good_codes", "def file_exists(filepath):\n\n return os.path.exists(filepath) and os.path.isfile(filepath)", "def _download_if_needed(file_path, url, show_progress):\n if file_path.exists() and not file_path.is_file():\n raise NotAFileError(file_path)\n elif not file_path.exists():\n get_logger().info('Downloading %s ...', file_path)\n reporthook = None\n if show_progress:\n reporthook = _UrlRetrieveReportHook()\n urllib.request.urlretrieve(url, str(file_path), reporthook=reporthook)\n if show_progress:\n print()\n else:\n get_logger().info('%s already exists. Skipping download.', file_path)", "def file_exist(self, file_id):\n filename = path.join(\n self._ext_config['dirresults'],\n \"{0}.{1}\".format(file_id, self.type_file)\n )\n if path.exists(filename):\n return True\n else:\n return False", "def fileExist(file):\n return os.path.exists(file) and os.path.isfile(file)" ]
[ "0.7852359", "0.7844551", "0.77893347", "0.77855074", "0.7775254", "0.7766845", "0.7766845", "0.76954746", "0.76517665", "0.75399566", "0.7395437", "0.7388874", "0.7315557", "0.72969633", "0.72895604", "0.7279086", "0.7263453", "0.7241822", "0.71523726", "0.7136986", "0.7090065", "0.70799917", "0.70388234", "0.6976379", "0.6958026", "0.69191253", "0.6865299", "0.6863917", "0.6855283", "0.68398225", "0.6819757", "0.6818934", "0.6802974", "0.67990726", "0.6782966", "0.6767521", "0.6758848", "0.675343", "0.6749936", "0.6730323", "0.67225415", "0.67173654", "0.6713206", "0.6711794", "0.6711355", "0.6710982", "0.6709023", "0.66963935", "0.6684052", "0.66654575", "0.6662648", "0.6649588", "0.66362745", "0.6632649", "0.6625325", "0.6615351", "0.6597854", "0.65963686", "0.65951496", "0.65951496", "0.65766066", "0.6572788", "0.65674835", "0.6551811", "0.65510803", "0.6549915", "0.6549303", "0.65474993", "0.65236497", "0.65200126", "0.6517137", "0.6511534", "0.6502069", "0.6499279", "0.6499015", "0.6488473", "0.6486006", "0.64731807", "0.64598954", "0.6457264", "0.6455261", "0.6454099", "0.64487195", "0.644419", "0.64409876", "0.64377254", "0.6425033", "0.6424913", "0.64195997", "0.6411009", "0.64045095", "0.6403859", "0.63960195", "0.6389509", "0.6389318", "0.63807255", "0.637007", "0.63657266", "0.6363661", "0.6362381" ]
0.84588355
0
Method that based on file url return appropriate hash.
def get_hash(file_url): file_extension = os.path.splitext(file_url)[1] return str(HASHES.get(file_extension))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_hash(self, filepath):\n if (os.path.isfile(filepath) and not (\n os.path.islink(filepath) and self.ignorelinks)):\n file_hash = self.hashfile(open(filepath, 'rb'))\n else:\n file_hash = self.hashstring(filepath)\n if not self._increment_hash:\n self._increment_hash = file_hash\n else:\n self._increment_hash = self.hashstring(\n self._increment_hash + file_hash)\n return file_hash", "def get_hash(self, url, hash_type):\n hasher = StreamHasher(chunk_size=self.multipart_chunksize, hashes=[hash_type])\n path = self.base_path / url\n if not path.exists():\n return None\n with path.open(\"rb\", self.CHUNK_SIZE) as f:\n hasher.compute(f)\n return hasher.hexdigest(hash_type)", "def static_file_hash(filepath):\n hasher = hashlib.md5() # nosec: B303\n\n with contextlib.closing(open(filepath, 'rb')) as file:\n hasher.update(file.read())\n return hasher.hexdigest()", "def _actual_hash(self):\n return hash_of_file(join(self._temp_path, self._downloaded_filename()))", "def calc_file_hash(filepath):\n with open(filepath, 'rb') as f:\n return md5(f.read()).hexdigest()", "def hash_from_file(file_path):\r\n return hash_from_code(open(file_path, 'rb').read())", "def _get_hash(self, path):\n with open(path, \"r\") as fp:\n content = fp.read()\n\n return sha256(content).hexdigest()", "def getHashFile(file):\n try:\n fileContent = open(file, 'rb').read()\n except:\n raise IOError, \"No such file...\"\n return False\n return getHash(fileContent)", "def hashfile(file):\n\n hasher = hashlib.sha256()\n\n with open(file, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n hasher.update(buf)\n\n return(hasher.hexdigest())", "def get_file_hash(self, filepath):\n if filepath not in self._file_hash_cache:\n self._file_hash_cache[filepath] = self.static_file_hash(filepath)\n return self._file_hash_cache[filepath]", "def get_file_hash(file_path):\n with open(file_path, 'rb') as f:\n file_name = os.path.basename(file_path)\n to_hash = f.read() + file_name.encode('utf-8')\n new_hash = hashlib.md5(to_hash).hexdigest()\n return new_hash", "def _hash_file_content(self, path):\n hasher = hashlib.sha1()\n with open(path, 'rb') as file:\n buffer = file.read(self.hash_block_size)\n while len(buffer) > 0:\n hasher.update(buffer)\n buffer = file.read(self.hash_block_size)\n return hasher.hexdigest()", "def hash_file(path: str) -> str:\n return _hash_file(path, hashlib.md5()).hexdigest()", "def compute_hash(fileName):\n m = hashlib.sha1()\n try:\n fd = open(fileName,\"rb\")\n except IOError:\n print (\"Unable to open the file in readmode:\", fileName)\n return\n content = fd.readlines()\n fd.close()\n for eachLine in content:\n m.update(eachLine)\n return m.hexdigest()", "def calculate_hash(filename, raise_on_not_found = False):\n if not is_file(filename) and not raise_on_not_found:\n return \"NOTFOUND\"\n\n with open(filename, \"rb\") as file:\n sha256 = hashlib.sha256()\n buf = file.read(128)\n while len(buf) > 0:\n sha256.update(buf)\n buf = file.read(128)\n return str(binascii.hexlify(sha256.digest()), \"utf8\")", "def hash_file(method, path):\n f = open(path, \"rb\")\n h = method()\n while True:\n buf = f.read(BUFSIZE)\n if not buf:\n break\n h.update(buf)\n return h.hexdigest()", "def hash_file(filename):\r\n\r\n # make a hash object\r\n h = hashlib.sha1()\r\n\r\n # open file for reading in binary mode\r\n with open(filename,'rb') as file:\r\n\r\n # loop till the end of the file\r\n chunk = 0\r\n while chunk != b'':\r\n # read only 1024 bytes at a time\r\n chunk = file.read(1024)\r\n h.update(chunk)\r\n\r\n # return the hex representation of digest\r\n return h.hexdigest()", "def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n \n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n \n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "def get_file_hash (fullpath) : \n\n # This bit was sourced from Stack Overflow via Google, specifically:\n # http://stackoverflow.com/questions/1131220/get-md5-hash-of-a-files-without-open-it-in-python\n\n md5 = hashlib.md5()\n with open(fullpath,'rb') as f: \n for chunk in iter(lambda: f.read(512*md5.block_size), ''): \n md5.update(chunk)\n # Hexdigest is the safe varchar(32) style output\n return md5.hexdigest()", "def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "def hash_file(file_path, hash_type=hashlib.sha256, binary=False, buffer_size=65536):\n hash_func = hash_type()\n with open(file_path, \"rb\") as file:\n while True:\n data = file.read(buffer_size)\n if not data:\n break\n hash_func.update(data)\n return hash_func.digest() if binary else hash_func.hexdigest()", "def hashFile(path: str) -> str:\n\tif not os.path.exists(path):\n\t\traise FileNotFoundError\n\n\thasher = hashlib.sha1()\n\tblock_sz = 8192\n\twith open(path, 'rb') as f:\n\t\tbuf = f.read(block_sz)\n\t\twhile len(buf) > 0:\n\t\t\thasher.update(buf)\n\t\t\tbuf = f.read(block_sz)\n\treturn str(hasher.hexdigest())", "def get_md5_hash(file_path: str) -> str:\n from hashlib import md5\n\n # local file\n if file_path.startswith('/'):\n return md5(open(file_path, 'rb').read()).hexdigest()\n\n # remote file\n httpresponse = url_is_alive(file_path)\n if not httpresponse:\n error_open_mess(file_path)\n return ''\n\n md5hash = md5()\n max_file_size = 100 * 1024 * 1024\n total_read = 0\n while True:\n data = httpresponse.read(4096)\n total_read += 4096\n\n if not data or total_read > max_file_size:\n break\n\n md5hash.update(data)\n\n httpresponse.close()\n return md5hash.hexdigest()", "def get_hash_from_file(img):\n with open(img, 'rb') as f:\n return hashlib.sha256(f.read()).hexdigest()", "def hash_file(filename):\n\n # make a hash object\n h = hashlib.sha256()\n\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n\n # return the hex representation of digest\n return h.hexdigest()", "def hash_for_file(file_name, block_size=2 ** 20):\n hasher = SHA256.new()\n source_file = open(file_name, \"r\")\n\n while True:\n data = source_file.read(block_size)\n if not data:\n break\n hasher.update(data.encode('utf-8'))\n\n source_file.close()\n return hasher.hexdigest()", "def get_file_hash(afile, hasher, block_size=65536):\n buf = afile.read(block_size)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(block_size)\n return hasher.digest()", "def hash_file(self, filename_or_bytestream):\n\n try:\n for data in self._read_blocks(filename_or_bytestream):\n self._update(data)\n except OSError as e:\n print('digest: ', filename_or_bytestream, ': ', e.strerror, sep='', file=sys.stderr)\n return None\n return self._hexdigests()", "def computeHash(filename):\n fileHash = hashlib.sha256()\n with open(filename, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n fileHash.update(chunk)\n return fileHash.hexdigest()", "def hash_file_at_path(file_path, algorithm=\"sha1\"):\n block_size = 64 * 1024\n hasher = getattr(hashlib, algorithm)()\n with open(file_path, \"rb\") as file_handler:\n while True:\n data = file_handler.read(block_size)\n if not data:\n break\n hasher.update(data)\n return hasher.hexdigest()", "def computeHash(infile):\n f = open(infile, 'rb')\n buffer = f.read()\n f.close()\n return hashlib.sha1(buffer).hexdigest()", "def get_md5_lookup(filename):\n lookup = {}\n\n with open(filename) as f:\n for row in f:\n (md5, sha256) = row.strip().split(\",\")\n lookup[md5] = sha256\n\n return lookup", "def get_hash(file_buffer):\n data = file_buffer.read()\n hasher = sha1()\n hasher.update(data)\n return hasher.hexdigest()", "def hash_file(file_to_hash):\n print(\"Hashing \" + file_to_hash + \"...\")\n hash_algorithm = hashlib.sha256()\n file = open(file_to_hash, 'rb')\n while True:\n contents = file.read(65536)\n if not contents:\n break\n hash_algorithm.update(contents)\n hash_str = hash_algorithm.hexdigest()\n return hash_str", "def hash_file(filepath):\n digest = hashlib.sha1()\n with open(filepath, 'rb') as f:\n while True:\n chunk = f.read(1024*1024)\n if not chunk:\n break\n digest.update(chunk)\n return digest.hexdigest()", "def hash_file(file_name):\n BLOCKSIZE = 65536\n hasher = hashlib.sha1()\n with open(file_name, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n return(hasher.hexdigest())", "def hash_file(filename):\n\n # make a hash object\n h = hashlib.sha1()\n\n # open file for reading in binary mode\n with open(filename, 'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n\n # return the hex representation of digest\n return h.hexdigest()", "def _hash_file(file: Union[str, Path], md5: Hash) -> Hash:\n if isinstance(file, str) and file.lower().startswith(\"file://\"):\n file = unquote(urlparse(file).path)\n if not Path(file).is_file():\n raise ValueError(str(file) + \" is not a valid file\")\n with open(file, \"rb\") as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n md5.update(data)\n return md5", "def file_digest(file):\n # 'rb' file mode reads the file as bytes\n input_file = open(file, 'rb')\n data = input_file.read()\n # getting the digest\n digest = hash_comparing(data).hexdigest()\n input_file.close()\n return digest", "def file_hash(filepath: Path):\n hsh = hashlib.sha256()\n b = bytearray(128 * 1024)\n mv = memoryview(b)\n with Path(filepath).open(\"rb\", buffering=0) as f:\n for n in iter(lambda: f.readinto(mv), 0):\n hsh.update(mv[:n])\n return hsh.hexdigest()", "def get_file(self) -> tuple:\r\n hash_md5 = hashlib.md5()\r\n with open(self.yara_base_file, \"rb\") as f:\r\n file_map = f.read()\r\n get_file_dict = get_matches(self, file_map)\r\n hash_md5.update(file_map)\r\n return hash_md5.hexdigest(), get_file_dict", "def file_hash(load, fnd):\n if \"env\" in load:\n # \"env\" is not supported; Use \"saltenv\".\n load.pop(\"env\")\n\n ret = {}\n\n if \"saltenv\" not in load:\n return ret\n\n if \"path\" not in fnd or \"bucket\" not in fnd or not fnd[\"path\"]:\n return ret\n\n cached_file_path = _get_cached_file_name(\n fnd[\"bucket\"], load[\"saltenv\"], fnd[\"path\"]\n )\n\n if os.path.isfile(cached_file_path):\n ret[\"hsum\"] = salt.utils.hashutils.get_hash(cached_file_path)\n ret[\"hash_type\"] = \"md5\"\n\n return ret", "def hashfile(filename):\n BLOCKSIZE = 65536\n sha1 = hashlib.sha1()\n with open(filename, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n sha1.update(buf)\n buf = afile.read(BLOCKSIZE)\n return(sha1.hexdigest())", "def hash(self) -> str:\n return md5(bytes(self.url, encoding=\"utf8\")).hexdigest()", "def _calculate_hash(self, file_object):\n hasher = self.hashlib()\n for chunk in self.iterchunks(file_object):\n hasher.update(chunk)\n return hasher.hexdigest()", "def _get_target_hash(self, target_filepath, hash_function='sha256'):\n\n # Calculate the hash of the filepath to determine which bin to find the \n # target. The client currently assumes the repository uses\n # 'hash_function' to generate hashes.\n\n digest_object = tuf.hash.digest(hash_function)\n\n try:\n digest_object.update(target_filepath)\n except UnicodeEncodeError:\n # Sometimes, there are Unicode characters in target paths. We assume a\n # UTF-8 encoding and try to hash that.\n digest_object = tuf.hash.digest(hash_function)\n encoded_target_filepath = target_filepath.encode('utf-8')\n digest_object.update(encoded_target_filepath)\n\n target_filepath_hash = digest_object.hexdigest() \n\n return target_filepath_hash", "def test_hash_url(self):\r\n url = u'http://google.com'\r\n hashed = generate_hash(url)\r\n self.assertEqual('aa2239c17609b2', hashed)", "def hashFile(filename):\n\tblocks = []\n\twith open(filename, 'rb') as f:\n\t\tblock = f.read(1024)\n\t\twhile block:\n\t\t\tblocks.append(block)\n\t\t\tblock = f.read(1024)\n\t\n\tprevHash = b''\n\tfor block in reversed(blocks):\n\t\thash = sha256(block + prevHash)\n\t\tprevHash = hash\n\treturn prevHash", "def hash(path):\n\n with open(path, 'r') as file:\n return hashlib.sha1(file.read()).hexdigest()", "def hash_of_file(path):\n with open(path, 'rb') as archive:\n sha = sha256()\n while True:\n data = archive.read(2 ** 20)\n if not data:\n break\n sha.update(data)\n return encoded_hash(sha)", "def fetch_local_hashcode(self, path):\n\t\treturn hashlib.sha256(open(self.config[\"daemon\"][\"rootdir\"] + path, \"rb\").read()).hexdigest()", "def _hash_file(self, file_entry):\n if file_entry is None:\n return None\n\n if file_entry.IsDevice() or file_entry.IsPipe() or file_entry.IsSocket():\n # Ignore devices, FIFOs/pipes and sockets.\n return None\n\n hash_context = hashlib.sha256()\n\n try:\n file_object = file_entry.GetFileObject()\n except IOError as exception:\n logging.warning((\n 'Unable to open path specification:\\n{0:s}'\n 'with error: {1!s}').format(file_entry.path_spec.location, exception))\n return None\n\n if not file_object:\n return None\n\n try:\n data = file_object.read(self._READ_BUFFER_SIZE)\n while data:\n hash_context.update(data)\n data = file_object.read(self._READ_BUFFER_SIZE)\n except IOError as exception:\n logging.warning((\n 'Unable to read from path specification:\\n{0:s}'\n 'with error: {1!s}').format(file_entry.path_spec.location, exception))\n return None\n\n return hash_context.hexdigest()", "def hash(self):\n block = 1024 * 1024 * 4 # 4 MB.\n hasher = hashlib.sha256()\n\n with open(self.path, \"rb\") as f:\n while True:\n chunk = f.read(block)\n if not chunk:\n break\n hasher.update(hashlib.sha256(chunk).digest())\n\n digest = hasher.hexdigest()\n pdbox.debug(\"Hash for %s: %s\" % (self.path, digest))\n return digest", "def _hash_file(fpath, algorithm='sha256', chunk_size=65535):\n if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64):\n hasher = hashlib.sha256()\n else:\n hasher = hashlib.md5()\n\n with open(fpath, 'rb') as fpath_file:\n for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n hasher.update(chunk)\n\n return hasher.hexdigest()", "def hash_file(fname,bs=M):\n h = hashlib.md5()\n with open(fname,'rb') as f:\n chunk = f.read(bs)\n while chunk:\n h.update(chunk)\n chunk = f.read(bs)\n return h.digest()", "def _hash_file(fpath, algorithm='sha256', chunk_size=65535):\n if (algorithm is 'sha256') or (algorithm is 'auto' and len(hash) is 64):\n hasher = hashlib.sha256()\n else:\n hasher = hashlib.md5()\n\n with open(fpath, 'rb') as fpath_file:\n for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n hasher.update(chunk)\n\n return hasher.hexdigest()", "def get_hash(link):\n return hashlib.sha256(link.encode('utf-8')).hexdigest()", "def hash_file(path, digest=None):\r\n digest = digest or hashlib.sha1()\r\n with open(path, 'rb') as fd:\r\n s = fd.read(8192)\r\n while s:\r\n digest.update(s)\r\n s = fd.read(8192)\r\n return digest.hexdigest()", "def hashfile(file: str, block_size: int = 65536) -> str:\n with open(file, 'rb') as message:\n m = hashlib.sha256()\n block = message.read(block_size)\n while len(block) > 0:\n m.update(block)\n block = message.read(block_size)\n digest = m.hexdigest()\n\n return digest", "def hash_file_native(file_path, tool=\"sha256sum\"):\n output = subprocess.check_output([tool, file_path], shell=False)\n return output.decode(\"utf-8\").partition(\" \")[0].strip()", "def hash_file_in_zip(zip_handler, file_path_in_zip, algorithm=\"sha1\"):\n block_size = 64 * 1024\n hasher = getattr(hashlib, algorithm)()\n with zip_handler.open(file_path_in_zip, \"r\") as file_handler:\n while True:\n data = file_handler.read(block_size)\n if not data:\n break\n hasher.update(data)\n return hasher.hexdigest()", "def get_file_hash(fname, hash_length):\n hash_sha = hashlib.sha256()\n with open(fname, 'rb') as infile:\n for chunk in iter(lambda: infile.read(4096), b''):\n hash_sha.update(chunk)\n hash_sha = hash_sha.hexdigest()\n hash_sha = int(hash_sha, 16) % (2 ** (4 * hash_length))\n return hex_encode(hash_sha, hash_length)", "def _CalculateDigestHash(self, file_entry, data_stream_name):\n file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)\n if not file_object:\n return\n\n try:\n file_object.seek(0, os.SEEK_SET)\n\n hasher_object = hashers_manager.HashersManager.GetHasher(u'sha256')\n\n data = file_object.read(self._READ_BUFFER_SIZE)\n while data:\n hasher_object.Update(data)\n data = file_object.read(self._READ_BUFFER_SIZE)\n\n finally:\n file_object.close()\n\n return hasher_object.GetStringDigest()", "def _HashFilename(filename):\n if isinstance(filename, unicode):\n filename = filename.encode(UTF8)\n else:\n filename = unicode(filename, UTF8).encode(UTF8)\n m = hashlib.sha1(filename)\n return 'TRACKER_' + m.hexdigest() + '.' + filename[-16:]", "def _get_file_sha256_hash(file_path):\n sha256hash = hashlib.sha256()\n chunk_size = 8192\n with open(file_path, \"rb\") as f:\n while True:\n buffer = f.read(chunk_size)\n if not buffer:\n break\n sha256hash.update(buffer)\n return sha256hash.hexdigest()", "def _hash_file(fpath, algorithm='sha256', chunk_size=65535):\n if isinstance(algorithm, str):\n hasher = _resolve_hasher(algorithm)\n else:\n hasher = algorithm\n\n with open(fpath, 'rb') as fpath_file:\n for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n hasher.update(chunk)\n\n return hasher.hexdigest()", "def sha_hash(file_name: str):\n BLOCKSIZE = 65536\n line = '' # format one line for hash\n with open(file_name, 'rb') as afile:\n buf = afile.read(BLOCKSIZE) # read each line of doc\n while len(buf) > 0:\n line += buf.decode('utf-8')\n buf = afile.read(BLOCKSIZE)\n\n hex = \"0x\" + sha1(line.encode()) # create sha1 hash\n return int(hex, 0)", "def get_hashes(self, url, hash_types):\n hasher = StreamHasher(chunk_size=self.multipart_chunksize, hashes=hash_types)\n path = self.base_path / url\n if not path.exists():\n return None\n with path.open(\"rb\", self.CHUNK_SIZE) as f:\n hasher.compute(f)\n return {hash_type: hasher.hexdigest(hash_type) for hash_type in hash_types}", "def hash_file ( filename ):\n sha1 = hashlib.sha1()\n with open( filename, 'rb' ) as f:\n while True:\n buf = f.read(65536) # read by 64kb buffers size\n if not buf:\n break\n sha1.update(buf)\n return sha1", "def get_url(self, hash):\n return self.get(hash=hash)", "def md5_hash(file_path):\n with open(file_path, 'rb') as fp:\n return md5(fp.read()).hexdigest()", "def file_hash(file_to_hash: Path) -> str:\n sha256_hash = hashlib.sha256()\n with file_to_hash.open(\"rb\") as f:\n for block in iter(lambda: f.read(4096), b\"\"):\n sha256_hash.update(block)\n return sha256_hash.hexdigest()", "def _resolve_hasher(algorithm, file_hash=None):\n if algorithm == 'sha256':\n return hashlib.sha256()\n\n if algorithm == 'auto' and file_hash is not None and len(file_hash) == 64:\n return hashlib.sha256()\n\n # This is used only for legacy purposes.\n return hashlib.md5()", "def hashing(file,pp):\n\n def myhash(instring):\n # sdbm hash\n res = 0\n for t in instring:\n res = (ord(t) + (res<<6) + (res<<16) - res) % 2**32\n return res\n\n return hex(myhash(file.replace('\\\\','/')+\":\"+pp))", "def wp_fp(self,url):\r\n\t\ttree = xml.etree.ElementTree.parse(\"doc/wp_versions.xml\")\r\n\t\tp = tree.findall(\"file\")\r\n\t\t#p2 = tree.findall(\"file/hash\")\r\n\t\t#p3 = tree.findall(\"file/hash/version\")\r\n\t\tfor elem in p:\r\n\t\t\ts = elem.getchildren()\r\n\t\t\tsrc = elem.attrib[\"src\"]\r\n\t\t\tcontent = self.get_cont(url+\"/\"+src)\r\n\t\t\tmd5p = md5.new(content).hexdigest()\r\n\t\t\t#print src\r\n\t\t\tfor ele in s:\r\n\t\t\t\tmd5c = ele.attrib[\"md5\"]\r\n\t\t\t\t#print \"[!] comparing \"+md5c+\" hash for \"+src+\" : \"+md5p\r\n\t\t\t\tif (md5c == md5p):\r\n\t\t\t\t\tr = ele.getchildren()\r\n\t\t\t\t\treturn r[0].text\r\n\t\t\t#print md5\r", "def semhash(file):\n _hash_helper(file)", "def semhash(file):\n _hash_helper(file)", "def calchash(filename):\n sha = hashlib.sha1()\n with open(filename, 'rb') as f:\n sha.update(f.read())\n return sha", "def get_sha256_file(filename):\n BLOCKSIZE = 65536\n hasher = hashlib.sha256()\n with open(filename, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n return hasher.hexdigest()", "def get_hash(path: Path) -> str:\n m = hashlib.sha256()\n m.update(path.read_bytes())\n return m.hexdigest()", "def hash(cls, path, digest=None, hasher=sha1):\r\n if digest is None:\r\n digest = hasher()\r\n with open(path, 'rb') as fh:\r\n cls.update_hash(fh, digest)\r\n return digest.hexdigest()", "def hash_file(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()", "def hashfile(self, afile):\n # encode_buffer = False\n\n buf = afile.read(self.blocksize)\n while buf:\n # Need to use string-escape for Python 2 non-unicode strings. For\n # Python 2 unicode strings and all Python 3 strings, we need to use\n # unicode-escape. The effect of them is the same.\n if isinstance(buf, str):\n buf = buf.encode('unicode-escape')\n\n self.hasher.update(buf)\n buf = afile.read(self.blocksize)\n return self.hasher.hexdigest()", "def _get_hash(self, *args):\n url_hash = hashlib.sha1()\n try:\n for value in args:\n value = unicode(value).encode('utf-8', 'replace')\n url_hash.update(value)\n return url_hash.hexdigest()\n except UnicodeDecodeError:\n return None", "def get_file_url(self):\n return ('/user-media/addons/3615/delicious_bookmarks-2.1.072-fx.xpi?'\n 'filehash=sha256%3A3808b13ef8341378b9c8305ca648200954ee7dcd8dc'\n 'e09fef55f2673458bc31f')", "def hash_file(file_path):\n\n \n\n def generator(f):\n while True:\n x = f.read(CHUNK_SIZE)\n if x:\n yield x\n else:\n return \n\n with open(file_path, 'rb') as f:\n a = generator(f)\n num_cores = min(multiprocessing.cpu_count(), MAX_CORES)\n hashes = Parallel(n_jobs=num_cores)(delayed(md4_hash)(i) for i in a)\n if len(hashes) == 1:\n return hashes[0].hex()\n else:\n return md4_hash(b\"\".join(hashes)).hex()", "def _get_sha_metadata(filename):\n with open(filename) as f:\n return hashlib.sha1(f.read()).hexdigest()", "def hashfile(fname, blocksize=65536):\n hasher = hashlib.md5()\n afile = open(fname, 'rb')\n buf = afile.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(blocksize)\n return hasher.hexdigest()", "def gdsii_hash(filename, engine=None):\n with open(filename, 'rb') as fin:\n data = fin.read()\n contents = []\n start = pos = 0\n while pos < len(data):\n size, rec = struct.unpack('>HH', data[pos:pos + 4])\n if rec == 0x0502:\n start = pos + 28\n elif rec == 0x0700:\n contents.append(data[start:pos])\n pos += size\n h = hashlib.sha1() if engine is None else engine\n for x in sorted(contents):\n h.update(x)\n return h.hexdigest()", "def get_hash(content):\n return hashlib.sha1(content).hexdigest()", "def get_url_data(url):\n\n # Return data while saving the data in a file \n # which is a hash of the URL\n data = requests.get(url).content\n # Save it in a filename\n filename = hashlib.md5(url.encode(\"utf8\")).hexdigest()\n# open(filename, 'w').write(data)\n with open(filename, \"w\") as fileObj:\n fileObj.write(data.decode(\"utf8\"))\n return data", "def calculate_hash(self, include_md: bool = True) -> str:\n # sourcery skip: reintroduce-else, swap-if-else-branches, use-named-expression\n # BUF_SIZE is totally arbitrary,\n BUF_SIZE = 65536 * 16 # lets read stuff in 16 x 64kb chunks!\n\n file_hash = hashlib.sha1()\n # Stubs Only\n files = list((self.package_path).rglob(\"**/*.pyi\"))\n if include_md:\n files += (\n [self.package_path / \"LICENSE.md\"]\n + [self.package_path / \"README.md\"]\n # do not include [self.toml_file]\n )\n for file in sorted(files):\n # TODO: Extract function to allow for retry on file not found\n try:\n with open(file, \"rb\") as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n file_hash.update(data)\n except FileNotFoundError:\n log.warning(f\"File not found {file}\")\n # ignore file not found errors to allow the hash to be created WHILE GIT / VIRUS SCANNERS HOLD LINGERING FILES\n return file_hash.hexdigest()", "def calculate_hash(host, filename):\n # TODO: For testcases specifically testing hashing routine\n # consider using a baseline external Davies-Meyer hash_value.c\n # Creating comparison hash from same library we are testing\n # may not be best practice here. (Holloway)\n try:\n # Check if libglusterfs.so.0 is available locally\n glusterfs = ctypes.cdll.LoadLibrary(\"libglusterfs.so.0\")\n g.log.debug(\"Library libglusterfs.so.0 loaded locally\")\n except OSError:\n conn = g.rpyc_get_connection(host)\n glusterfs = \\\n conn.modules.ctypes.cdll.LoadLibrary(\"libglusterfs.so.0\")\n g.log.debug(\"Library libglusterfs.so.0 loaded via rpyc\")\n\n computed_hash = \\\n ctypes.c_uint32(glusterfs.gf_dm_hashfn(filename, len(filename)))\n # conn.close()\n\n return int(computed_hash.value)", "def hash_file(path):\n if not os.path.isfile(path):\n raise ValueError(\"The given path `{}` is not a file.\".format(path))\n\n md5 = hashlib.md5()\n\n with open(path, 'rb') as file_:\n while True:\n data = file_.read(65536)\n if not data:\n break\n md5.update(data)\n\n return \"{}\".format(md5.hexdigest())", "def quick_hash_file(fname,bs=M):\n size = os.path.getsize(fname)\n if size < 3*bs:\n return hash_file(fname,bs)\n h = hashlib.md5()\n with open(fname,'rb') as f:\n h.update(f.read(bs))\n f.seek(size//2,0)\n h.update(f.read(bs))\n f.seek(-bs,2)\n h.update(f.read(bs))\n return h.digest()", "def _calc_hash(self) -> None:\n self.image = Image.open(self.path)\n self.image = self.image.convert(\"L\")\n self.image = self.image.resize((self.width, self.height), Image.ANTIALIAS)\n lpixels = list(self.image.getdata())\n self.hash = \"0b\"\n for i, pixel in enumerate(lpixels):\n if (i + 1) % self.width == 0 and i != 0:\n continue\n if pixel < lpixels[i + 1]:\n self.hash += \"1\"\n continue\n self.hash += \"0\"\n self.hash_hex = DHash.bin2hex(self.hash)", "def get_hashid_and_urlid(url):\n existing = db.select('id, hashid', 'ImageURLs', 'url LIKE \"%s\"' % clean_url(url))\n if existing:\n urlid = existing[0][0]\n hashid = existing[0][1]\n return hashid, urlid, False\n\n # Download image\n if url.startswith('//'):\n url = 'http:%s' % url\n logger.debug('Downloading %s ...' % url)\n try:\n image_buffer = web.download(url)\n except Exception as e:\n logger.debug('Failed')\n raise Exception('Unable to download image at %s: %s' % (url, e))\n\n # Get image hash\n try:\n logger.debug('Hashing ...')\n image = image_from_buffer(image_buffer)\n (width, height) = image.size\n image_hash = str(avhash(image))\n except Exception as e:\n logger.debug('Failed')\n raise e\n logger.debug('Indexing ... ')\n\n # Insert image hash into Hashes table\n hashid = db.insert('Hashes', (None, image_hash))\n if hashid == -1:\n # Already exists, need to lookup existing hash\n hashids = db.select('id', 'Hashes', 'hash = \"%s\"' % (image_hash,))\n if not hashids:\n raise Exception('unable to add hash to table, or find hash (wtf?)')\n hashid = hashids[0][0]\n\n # Image attributes\n try:\n filesize = len(image_buffer)\n url = clean_url(url)\n urlid = db.insert('ImageURLs', (None, url, hashid, width, height, filesize))\n create_thumb(image, urlid)\n logger.debug('Done')\n except Exception as e:\n raise e\n return hashid, urlid, True", "def __hash__(self):\n return hash(self._full_path)", "def hash_file(self, file_path, file_arcname):\n\n file_path = os.path.abspath(file_path)\n\n # If the file_arcname argument is None use the base file name as the\n # arc name\n if file_arcname is None:\n file_arcname = os.path.basename(file_path)\n\n if not os.path.exists(file_path):\n task_error(\"%s doesn't exist\" % file_path)\n if not os.access(file_path, os.R_OK):\n task_error(\"Can't read from %s\" % file_path)\n\n file_mode = os.stat(file_path)[stat.ST_MODE]\n if not stat.S_ISDIR(file_mode) and not stat.S_ISREG(file_mode):\n task_error(\"Unknown file type for %s\" % file_path)\n\n file_in = None\n try:\n # open to read binary. This is important.\n file_in = open(file_path, 'rb')\n except IOError:\n task_error(\"Couldn't read from file: %s\" % file_path)\n\n # hash file 1Mb at a time\n hashval = hashlib.sha1()\n while True:\n data = file_in.read(1024 * 1024)\n if not data:\n break\n hashval.update(data)\n\n # update file bundle status\n\n self.running_size += len(data)\n\n self.percent_complete = 100.0 * self.running_size / self.bundle_size\n\n # only update significant progress\n if self.percent_complete - self.last_percent > 1:\n self.report_percent_complete()\n self.last_percent = self.percent_complete\n\n file_hash = hashval.hexdigest()\n\n # print 'hash: ' + file_hash\n file_in.close()\n\n modified_name = os.path.join('data', file_arcname)\n (file_dir, file_name) = os.path.split(modified_name)\n\n # linuxfy the directory\n file_dir = file_dir.replace('\\\\', '/')\n\n info = {}\n info['size'] = os.path.getsize(file_path)\n mime_type = mimetypes.guess_type(file_path, strict=True)[0]\n\n info['mimetype'] = mime_type if mime_type is not None else 'application/octet-stream'\n info['name'] = file_name\n info['mtime'] = DT.datetime.utcfromtimestamp(int(os.path.getmtime(file_path))).isoformat()\n info['ctime'] = DT.datetime.utcfromtimestamp(int(os.path.getctime(file_path))).isoformat()\n info['destinationTable'] = 'Files'\n info['subdir'] = file_dir\n info['hashsum'] = file_hash\n info['hashtype'] = 'sha1'\n\n # todo make sure errors bubble up without crashing\n if file_arcname in self.file_meta:\n print file_arcname\n task_error(\n \"Different file with the same arcname is already in the bundle\")\n return\n\n return info" ]
[ "0.7292291", "0.72585297", "0.72363", "0.7177594", "0.7159621", "0.70332694", "0.70052016", "0.6999285", "0.69315827", "0.6879491", "0.6835787", "0.6830834", "0.6828008", "0.6765713", "0.67623746", "0.67612416", "0.6758131", "0.6731684", "0.6731684", "0.67308", "0.67212594", "0.671724", "0.670072", "0.6694894", "0.6681219", "0.66709065", "0.66614497", "0.6655756", "0.6652983", "0.6650765", "0.6649222", "0.6632091", "0.6630716", "0.66301847", "0.6622855", "0.6619525", "0.66176754", "0.66069394", "0.65958893", "0.65826166", "0.6562323", "0.6546118", "0.65288067", "0.65267235", "0.6525101", "0.65204704", "0.65126574", "0.6493367", "0.64787835", "0.6465545", "0.6464368", "0.6463509", "0.6458611", "0.64565855", "0.64480054", "0.64330596", "0.6424553", "0.6415155", "0.64130014", "0.63977647", "0.639291", "0.6371144", "0.63574475", "0.63525933", "0.6339991", "0.6301981", "0.6294359", "0.62895095", "0.62855625", "0.6281774", "0.6279994", "0.6274381", "0.6273779", "0.62653077", "0.6259613", "0.62579155", "0.62506497", "0.62506497", "0.6221353", "0.62202734", "0.6215881", "0.62146914", "0.6206753", "0.62017727", "0.6196214", "0.6182657", "0.6175794", "0.6171661", "0.6169833", "0.61696154", "0.6165476", "0.61590654", "0.6154314", "0.6130398", "0.6123409", "0.61097664", "0.6093972", "0.6085897", "0.6084604", "0.60845953" ]
0.8334774
0
Fahrenheit to Celsius conversion. Requests temperature in Fahrenheit degrees and computes temperature in Celsius degrees and prints in Celsius scale.
def f2c_qa_function(): F = float(input("Provide a Fahrenheit temperature in degrees: ")) C = 5/9.0*F - 32 print("The temperatire in Celcius is {:g}".format(C))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fahrenheit_to_celsius():\n fahrenheit = ent_temperature.get()\n celsius = (5 / 9) * (float(fahrenheit) - 32)\n lbl_result[\"text\"] = f\"{round(celsius, 2)} \\N{DEGREE CELSIUS}\"", "def fahrenheit_to_celsius(fahrenheit):\n offset = 32\n multiplier = 5 / 9\n celsius = (fahrenheit - offset) * multiplier\n print(\"inside function:\", fahrenheit, offset, multiplier, celsius)\n return celsius", "def fahrenheit_to_celsius(fahrenheit):\n offset = 32\n multiplier = 5 / 9\n celsius = (fahrenheit - offset) * multiplier\n print(\"inside function:\", fahrenheit, offset, multiplier, celsius)\n return celsius", "def convert_f_to_c(temp_in_farenheit): ## ##\n celsiustemp = round((temp_in_farenheit - 32) * 5/9, 1) ##\n return celsiustemp ##", "def fahrenheit_to_celsius(temp):\n return (temp - 32) * 5/9", "def convertCelsiusToFahrenhe(C):\n if isinstance(C, str) == True:\n raise ValueError(\"Celsius cannot be a string value\")\n if isinstance(C,complex) == True:\n raise ValueError(\"Celsius cannot be a complex value\")\n if isinstance(C,int) == True:\n raise ValueError(\"Celsius should be a float value, example: 90.00\")\n \n F = (9.0/5.0 * C + 32.0)\n return F", "def convert_f_to_c(temp_in_farenheit):\n celcius_temp = round(float((temp_in_farenheit) - 32)*(5/9),1)\n return(celcius_temp)", "def celsius_to_fahrenheit(celsius):\n fahrenheit = (celsius * (9.0/5.0)) + 32.0\n return fahrenheit", "def fahr_to_celsius(temp):\n tempInCel = (temp - 32) * 5/9\n return tempInCel", "def convert_to_celsius(fahrenheit):\n return (fahrenheit - 32) * 5 / 9", "def convert_celsius_to_fahrenheit(celsius):\n return celsius * 9.0 / 5 + 32", "def fahrenheit(celsius):\n return 9 / 5 * celsius + 32", "def toCelsius(farenheit):\r\n return (farenheit - 32)*5 / 9", "def celsius(fahrenheit):\n return 5 / 9 * (fahrenheit - 32)", "def _celsius_to_fahrenheit(self) -> None:\n if self.units == \"celsius\":\n self.value = (((self.value / 5) * 9) + 32).__round__(2)\n self.units = \"fahrenheit\"\n else:\n msg = (\n \"Not a valid unit conversion, expected units to be in 'celsius' but instead \"\n + f\"units were in {self.units}.\"\n )\n raise ValueError(msg)", "def convert_f_to_c(temp_in_farenheit):\n temp_in_celcius = ((temp_in_farenheit - 32) * 5) / 9\n temp_in_celcius = round(temp_in_celcius, 1)\n return temp_in_celcius", "def convert_to_fahrenheit(self):\n try:\n self.root.ids.celsius_input.hint_text = 'Enter amount in Celsius'\n self.root.ids.fahrenheit_input.text = '{:.2f}'.format(float(self.root.ids.celsius_input.text)\n * 9.0 / 5 + 32)\n except ValueError:\n self.root.ids.celsius_input.text = ''\n self.root.ids.celsius_input.hint_text = 'Invalid number'", "def convert_f_to_c(temp_in_farenheit):\n \n temp=round((float(temp_in_farenheit)-32)*5/9,1)\n \n return (temp)", "def temperature_converter():\n while True:\n # Get the choice from the user\n print(\"Enter choice (1/2):\")\n print(\"1. Fahrenheit to Celsius\")\n print(\"2. Celsius to Fahrenheit\")\n user_choice = int(input())\n\n # Based on the option chosen, get temperature input, process and print output\n if user_choice == 1:\n fahrenheit = float(input(\"Enter Temperature in Fahrenheit : \"))\n celsius = (fahrenheit - 32) * 5 / 9\n print(\"Temperature: \" + str(fahrenheit) + \" F = \" + str(celsius) + \" C\")\n elif user_choice == 2:\n celsius = float(input(\"Enter Temperature in Celsius : \"))\n fahrenheit = (celsius * 9 / 5) + 32\n print(\"Temperature: \" + str(celsius) + \" C = \" + str(fahrenheit) + \" F\")\n else:\n print(\"Invalid option!\")\n\n print(\"\")", "def fahrenheit(T_in_celsius):\n return (T_in_celsius * 9 / 5) + 32", "def celsius_conv(self, f):\n if f == 0:\n return -17.7778\n else:\n return (f - 32.0) * (5.0 / 9.0)", "def c_to_f(celsius):\n fahrenheit = round((celsius * 1.8) + 32, 2)\n return fahrenheit", "def convert_f_to_c(temp_in_farenheit):\n cel = round((((temp_in_farenheit - 32) * 5) / 9),1)\n return cel", "def f2c_cml_function():\n import sys\n\n F = float(sys.argv[1])\n C = 5/9.0*F - 32\n print(\"The temperatire in Celcius is {:g}\".format(C))", "def convert_to_celsius(self):\n try:\n self.root.ids.fahrenheit_input.hint_text = 'Enter amount in Fahrenheit'\n self.root.ids.celsius_input.text = '{:.2f}'.format((float(self.root.ids.fahrenheit_input.text) - 32)\n * 5 / 9)\n except ValueError:\n self.root.ids.fahrenheit_input.text = ''\n self.root.ids.fahrenheit_input.hint_text = 'Invalid number'", "def tempConvert(temp, unit):\n if unit == 'F':\n celsius = (temp - 32) * 5 / 9\n return celsius\n else:\n return temp", "def to_fahrenheit(celsius):\n\n return (1.8*celsius) + 32", "def cels_to_fahr():\n while True:\n celsius = input(\"Podaj temperaturę w stopniach Celsjusza: \")\n try:\n int(celsius)\n break\n except ValueError:\n try:\n float(celsius)\n break\n except ValueError:\n print(\"Nieprawidłowe dane, podaj temperaturę jako wartość liczbową.\")\n print('''Wzór na przeliczanie stopni Celsjusza na stopnie Fahrenheita:\n [\\u00b0F] = [\\u00b0C] * 9/5 + 32''')\n print(\"Podana temperatura przeliczona na stopnie Fahnrenheita: \", end=\"\")\n print(float(celsius) * 9 / 5 + 32)", "def cels_fahr(cels):\n temp = cels * 9.0 / 5 + 32\n return temp", "def fahr_to_celcius(temp_fahr):\n temp_celcius = (temp_fahr - 32) * 5/9\n return temp_celcius", "def celcius_to_fahrenheit(celcius_float):\n return celcius_float * 1.8 + 32", "def celsius_to_fahr(temp):\n return temp * (9/5) + 32", "def fahrenheit(celsius):\n return ((celsius/5)*9)+32", "def fahrenheitToCelcius(fahrenheit:float, ndigits = 2)->float:\n return round((float(fahrenheit) - 32) * 5 / 9, ndigits)", "async def c(self, f : float):\n c = (f-32) * 5/9\n await self.bot.say(\"{0} Celsius\".format(c))", "def GetFahrenheit(self):\n return self.GetCelcius()*1.8+32", "def convertTemp(t, convertTo=\"C\"):\n # check if target temperature is celcius (metric)\n if convertTo == \"C\":\n # returns celcius (metric) temperature\n return round(((5 / 9) * (t - 32)), 1)\n else:\n # returns fahrenheit but rounded\n return round(t, 1)", "def celciusToFahrenheit(celcius: float, ndigits: int = 2)->float:\n return round((float(celcius) *9 / 5) + 32 , ndigits)", "def target_temperature_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_f\"))\r\n return celsius_to_fahrenheit(self.target_temperature_c)", "def testCtoF(self):\r\n for integer, numeral in self.ctofvalues:\r\n result = conversions_refactored.convert('Celsius', 'Fahrenheit', integer) \r\n self.assertEqual(numeral, result, msg='Incorrect result, calculation error')", "def temperature() -> float:", "def testFtoC(self):\r\n for integer, numeral in self.ftocvalues:\r\n result = conversions_refactored.convert('Fahrenheit', 'Celsius', integer) \r\n self.assertEqual(numeral, result, msg='Incorrect result, calculation error')", "def temperature(self):\n self.convert_window(\"Temperature\", \"Celsius\", [\"Celsius\", \"Fahrenheit\", \"Kelvin\", \"Rankine\", \"Reaumur\", \"Newton\", \"Romer\", \"Delisle\"])", "def get_temperature(self): # This function implements the equations needed to convert the digital data to degrees celsius\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n self.digital_temp_data() \n dT = self.tempadc-(C_5*(2**8))\n temperature=(2000+(dT*(C_6/(2**23))))/100\n return temperature, dT", "def convert_temperature(self, event):\n try:\n #Compare other unit to one unit(celsius) then compare that unit to celsius\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Celsius\": current_value * 1.0, \"Fahrenheit\": (current_value - 32) / 1.8, \"Kelvin\": current_value - 273.15, \"Reaumur\": current_value / 0.8, \"Rankine\": (current_value - 491.67) / 1.8, \"Newton\": current_value / 0.33, \"Romer\": (current_value - 7.5) / 0.525, \"Delisle\": 100 - current_value * 0.66666667}\n new_value={\"Celsius\": unit_comp[current_unit], \"Fahrenheit\": unit_comp[current_unit] * 1.8 + 32, \"Kelvin\": unit_comp[current_unit] + 273.15, \"Reaumur\": unit_comp[current_unit] * 0.8, \"Rankine\": unit_comp[current_unit] * 1.8 + 491.67, \"Newton\": unit_comp[current_unit] * 0.33, \"Romer\": unit_comp[current_unit] * 0.525 + 7.5, \"Delisle\": (100 - unit_comp[current_unit]) * 1.5}\n printer = \"Value is invalid.\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(new_value[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def fahr_to_celsius(fahr):\n result_in_celsius = (fahr - 32) + 5/9\n return result_in_celsius", "def toCelcius (x):\r\n\r\n\tc = x-32\r\n\tc = 5*c/9\r\n\treturn c", "def translate_from_farenheit_to_celsius(farenheit: float) -> float:\n return (farenheit - 32) * 5./9.", "def temperature(self) -> float:\n # Start a measurement then poll the measurement finished bit.\n self.temp_start = 1\n while self.temp_running > 0:\n pass\n # Grab the temperature value and convert it to Celsius.\n # This uses the same observed value formula from the Radiohead library.\n temp = self._read_u8(_REG_TEMP2)\n return 166.0 - temp", "def convertFarenheitToCelsius(F):\n if isinstance(F, str) == True:\n raise ValueError(\"Farenheit cannot be a string value\")\n if isinstance(F,complex) == True:\n raise ValueError(\"Farenheit cannot be a complex value\")\n if isinstance(F,int) == True:\n raise ValueError(\"Farenheit should be a float value, example: 120.50\")\n \n C = (F-32)/1.8\n return C", "def celsius_to_fahr(degrees_celsius: float) -> float:\n return (degrees_celsius * 9.0 / 5.0) + 32.0", "def convertTemperature():\n global runtime\n global chan\n global delay\n thread = threading.Timer(delay, convertTemperature)\n thread.daemon = True\n thread.start()\n\n #Get temperature in readable values\n ADC_raw = chan.value\n ADC_voltage = chan.voltage\n temperature = ((ADC_raw*3.3)/float(1023))\n temperature = round(temperature,2)\n \n # Run time after operation\n end_time = time.time()\n runtime = round(end_time-start_time, 0)\n line = (f'{runtime}s',ADC_raw, f'{temperature} {chr(176)}C')\n print(\"{0: <20} {1: <20} {2: <20}\".format(*line))", "def convertCelsiusToFahrenheit(degrees):\n degrees = str(degrees)\n convert = (decimal.Decimal(degrees) / decimal.Decimal('5') * 9) + 32\n return float(convert)", "def kelvin_to_celsius(temp):\n return temp - 273.15", "def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()", "def f2c_cml_exc_function():\n import sys\n\n try:\n F = float(sys.argv[1])\n C = 5/9.0*F - 32\n print(\"The temperatire in Celcius is {:g}\".format(C))\n except:\n print(\"Format should be {} with a temperature in Farenheit\" \\\n .format(sys.argv[0]))", "def target_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_c\"))\r\n return kelvin_to_celsius(self._target_temperature)", "def temperature():\n from .imperial import deg_F as F\n from .imperial import deg_R as R\n\n K = si.K\n C = si.deg_C\n\n return Equivalency(\n [\n (K, C, lambda x: x - 273.15, lambda x: x + 273.15),\n (C, F, lambda x: x * 1.8 + 32.0, lambda x: (x - 32.0) / 1.8),\n (K, F, lambda x: x * 1.8 - 459.67, lambda x: (x + 459.67) / 1.8),\n (R, F, lambda x: x - 459.67, lambda x: x + 459.67),\n (R, C, lambda x: (x - 491.67) * (5 / 9), lambda x: x * 1.8 + 491.67),\n (R, K, lambda x: x * (5 / 9), lambda x: x * 1.8),\n ],\n \"temperature\",\n )", "def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)", "def CtoF (celsius):\n f=(1.8)*celsius+32\n return int(round(f))", "def get_compensated_temperature() -> float:\n comp_factor = 2.25\n cpu_temp = get_cpu_temperature()\n raw_temp = bme280.get_temperature()\n comp_temp = raw_temp - ((cpu_temp - raw_temp) / comp_factor)\n # print(\"\"\"\n # Compensated_Temperature: {:05.2f} *C\n # Pressure: {:05.2f} hPa\n # Relative humidity: {:05.2f} %\n # \"\"\".format(temperature, pressure, humidity))\n return comp_temp", "def _calculate_temperature(c, h):\n\n return (c - 331.4 - 0.0124 * h) / 0.6", "def get_temperature(data):\n celcius = 0\n celcius = [i for i in data if re.search(r'\\d+[/]', i)]\n \n if celcius == []:\n return \"N/A\"\n celcius = celcius[0].split('/')[0]\n celcius = celcius.replace('M', '-')\n \n try:\n celcius = int(celcius)\n except ValueError:\n return \"N/A\"\n\n farenheit = round((celcius * 9/5) + 32) # formula to get farenheit from celcius\n temperature = \"{0} C ({1} F)\".format(celcius, farenheit)\n return temperature", "def fahrenheit(self):\n return (self.celsius * 9 / 5) + 32", "def temperature_f(self, tuple_data, status):\r\n fahr_search = Temperature.fahr.search(status)\r\n temperature = None\r\n try:\r\n if fahr_search != None:\r\n temperature = fahr_search.group(2).replace(\",\", \".\")\r\n temperature = float(temperature)\r\n else:\r\n celcius_search = Temperature.celcius.search(status)\r\n if celcius_search != None:\r\n temperature = celcius_search.group(2).replace(\",\", \".\")\r\n temperature = float(temperature)\r\n temperature = ((9.0/5) * temperature) + 32\r\n except ValueError:\r\n print \"Encoding error on '%s'\" % (status)\r\n return temperature", "def kelvin_to_fahr(temp):\n temp_c = kelvin_to_celsius(temp)\n result = celsius_to_fahr(temp_c)\n return result", "def eco_temperature_high_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_high_f\"))\r\n return celsius_to_fahrenheit(self.eco_temperature_high_c)", "def eco_temperature_low_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_f\"))\r\n return celsius_to_fahrenheit(self.eco_temperature_low_c)", "def temperature(self):\r\n self._read_temperature()\r\n return self._t_fine / 5120.0", "def testFtoCValues(self):\r\n for c,f in self.knownConversionsCtoF:\r\n result = conversions.convertFahrenheittoCelsius(f)\r\n self.assertEqual(c,result)", "def ctof(temp):\n return temp * 9/5 + 32 # functions should be surrounded by 2 blank lines", "def kelvin_to_fahrenheit(kelvin_temp):\n\n\treturn math.floor(9/5 * (kelvin_temp - 273) + 32)", "def f2c_file_read_function():\n with open('data.txt', 'r') as infile:\n data = [i.strip().split() for i in infile] # store data as list\n\n F = float(data[-1][-1]) # last item in data should be value\n C = 5/9.0*F - 32\n print(\"The temperatire in Celcius is {:g}\".format(C))", "def convert(temp_in_c):\n \n return temp_in_c * (9/5) + 32", "def temperature(self):\n temp = ct.c_float()\n self.lib.GetTemperatureF(ct.pointer(temp))\n return temp.value", "def convertCelsiusToFahrenheit(degreesInCelsius):\n return (((degreesInCelsius) * 9/5) + 32)", "def ccdtemp(n=2):\n temp = camera.GetTemperature()\n camera.status.update()\n mesg, f1, f2, f3, f4 = camera.GetTemperatureStatus()\n print \"Sensor Temp=%6.1f, TargetTemp=%6.1f, AmbientTemp=%6.1f, CoolerVolts=%6.2f\" % (f1,f2,f3,f4)\n return temp", "def __call__(self, p_input:float, p_range=None) -> float:\n if self.get_type() == self.C_UNIT_CONV_TEMPERATURE:\n output = self._temperature(p_input)\n else:\n output = self._scalar_conversion(p_input)\n \n return output", "async def f(self, c : float):\n f = c * 9/5 + 32\n await self.bot.say(\"{0} Fahrenheit\".format(f))", "def testCtoFValues(self):\r\n for c,f in self.knownConversionsCtoF:\r\n result = conversions.convertCelsiusToFahrenheit(c)\r\n self.assertEqual(f,result)", "def convert(df,celsius):\r\n converted_temp=(df[celsius]*(9/5))+32\r\n return converted_temp", "def get_external_temp():\n baseurl = \"http://api.openweathermap.org/data/2.5/weather\"\n query = \"?q=salhouse&mode=xml\"\n url = baseurl + query\n r = requests.get(url)\n root = ET.fromstring(r.text)\n kelvin = float(root[1].attrib.get('value'))\n celcius = kelvin - 272.15\n return celcius", "def cel_to_fahren(temp_list):\n fahren_list = [round(temp*9/5+32) for temp in temp_list]\n return fahren_list", "def readtemperature(self, cTemp):\r\n\t\tdata = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_STATUS)\r\n\t\twhile (data & 0x01) != 0 :\r\n\t\t\tdata = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_STATUS)\r\n\t\tdata1 = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_DATAH)\r\n\t\tdata2 = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_DATAH)\r\n\t\t\r\n\t\t# Convert the data to 14-bits\r\n\t\tcTemp = (((data1 * 256.0) + data2) / 4.0)\r\n\t\t\r\n\t\tif cTemp < 0x0140 :\r\n\t\t\tcTemp = 0x0140\r\n\t\telif cTemp > 0x12C0 :\r\n\t\t\tcTemp = 0x12C0\r\n\t\telse :\r\n\t\t\tcTemp = cTemp\r\n\t\t\r\n\t\tcTemp = (cTemp / 32.0) - 50.0\r\n\t\tfTemp = cTemp * 1.8 + 32\r\n\t\t\r\n\t\treturn {'c' : cTemp, 'f' : fTemp}", "def temp_converter():\n degreeC = input(\"What degree in C do you want to convert to F? \")\n degreeF = int(degreeC) * 9 / 5 + 32\n print(\"\\nRobbie says:\\n\")\n print(\"I converted %s C in to %s F!\" % (degreeC, degreeF))", "def temperature(self):\n return float(self._current_observation['temp_c'])", "def eco_temperature_high_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_high_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_high)", "def read_object_temperatureF(self, ):\n return self.read_object_temperatureC() * (9.0/5.0) + 32.0", "def eco_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_low)", "def get_temp(val):\n if val in ['', 32767]:\n return None\n return temperature(val / 100., 'C').value('F')", "def FtoC(F):\n c = ((F-32)/9.)*5\n return c", "def conversion(temp, mode):\n if mode == 1:\n c_to_f = (temp * 9/5) + 32\n return c_to_f\n else:\n f_to_c = (temp - 32) * 5 / 9\n return f_to_c", "def test_temperature(self):\r\n self.assertEqual(Converter.TemperatureCtoF(50), 122)\r\n self.assertEqual(Converter.TemperatureCtoF(-50), -58)\r\n self.assertEqual(Converter.TemperatureFtoC(50), 10)\r\n self.assertAlmostEqual(Converter.TemperatureFtoC(-50), -45.55, places=0)", "def convert_temp(self, temperature):\n return 1.8 * (temperature - 273) + 32", "def read_temperature(self):\n self._force_read(False)\n\n tempADC = (self._read_register_1ubyte(self.BME680_TEMP_MSB) << 12) | (self._read_register_1ubyte(self.BME680_TEMP_LSB) << 4) | (self._read_register_1ubyte(self.BME680_TEMP_XLSB) >> 4)\n\n return float(self._compensate_temperature(tempADC))", "def target_temperature_high_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperatue_high_f\"))\r\n return celsius_to_fahrenheit(self.target_temperature_high_c)", "def c2f(t):\r\n return round(9*t/5 + 32)", "def temp(update: Update, context: CallbackContext) -> None:\n t1 = __sauna.control.getPortValue(\"Temperature Sensor\")\n t2 = float(\"{:.1f}\".format(t1))\n update.message.reply_text(\"Current Temp \" + str(t2) + \" Grad\")", "def convertC(TF):\r\n TC = 5.0/9.0*(TF - 32.0)\r\n return TC", "def ambient_temperature_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_f\"))\r\n return celsius_to_fahrenheit(self.ambient_temperature_c)" ]
[ "0.80117464", "0.7945186", "0.7945186", "0.7777677", "0.7635342", "0.7612416", "0.75738084", "0.75246674", "0.7467973", "0.7394001", "0.73162985", "0.7294444", "0.7292795", "0.72802883", "0.72540873", "0.7165619", "0.7144465", "0.709325", "0.7084599", "0.7056874", "0.7048489", "0.70000094", "0.6968172", "0.6956087", "0.69494826", "0.6946906", "0.6917419", "0.6868868", "0.6860845", "0.6836721", "0.6832608", "0.6823912", "0.68129945", "0.679861", "0.67713416", "0.67272884", "0.6650758", "0.6620337", "0.6601068", "0.6592547", "0.6574057", "0.6566602", "0.6546826", "0.65462184", "0.6481317", "0.64440787", "0.64420193", "0.6431392", "0.6418682", "0.6397396", "0.63912845", "0.63718534", "0.6317244", "0.6306432", "0.6305922", "0.62892574", "0.6286168", "0.627977", "0.6278265", "0.6268586", "0.6215568", "0.62055695", "0.61877537", "0.6187134", "0.61802983", "0.6172678", "0.61638945", "0.6157174", "0.6151498", "0.61477214", "0.61257213", "0.6117945", "0.60752463", "0.60728174", "0.60638905", "0.6051484", "0.6044073", "0.60298085", "0.60094416", "0.5977423", "0.5948963", "0.5940147", "0.59308106", "0.59302324", "0.5928329", "0.5916675", "0.5915821", "0.59058124", "0.5901984", "0.5899298", "0.5866646", "0.58664614", "0.58637583", "0.5842106", "0.5839472", "0.582754", "0.582432", "0.58199185", "0.58117336", "0.58013296" ]
0.74533796
9
Take an argument as input from the command line
def f2c_cml_function(): import sys F = float(sys.argv[1]) C = 5/9.0*F - 32 print("The temperatire in Celcius is {:g}".format(C))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cli(args): # noqa; pylint: disable=unused-argument", "def main(args=None):", "def main(args=None):", "def main(args):", "def main(args):", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input\", help=\"Fasta rDNA input\")\n parser.add_argument(\"output\", help=\"GFF annotation\")\n parser.add_argument(\"kingdom\", help=\"Choose kingdom\")\n args = parser.parse_args()\n command(args)", "def main(args):\n cli = CLI()\n # Check arguments\n cli.parse_arguments(args)", "def commandline():\n command_parser = argparse.ArgumentParser(description=__doc__, epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter)\n command_parser.add_argument('-i','--input_file', type=str, required=True, help='input file.')\n command_parser.add_argument('-o','--output_file', type=str, required=True, help='output file.')\n args = command_parser.parse_args()\n return args", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv[1:]", "def parse_arguments(args):", "def get_input_file():\n if len(sys.argv) < 3:\n return -1\n return sys.argv[2]", "def cli(**_) -> None:\n pass", "def cmd_appe(args):", "def main():\r\n args = sys.argv\r\n print 'Script:', args[0]\r\n args.pop(0)\r\n for i, argument in enumerate(sys.argv):\r\n print 'Argument {}: {}'.format(i, argument)\r\n print 'Type: {}'.format(type(argument))", "def main(args=None):\n pass", "def usage():\n\n print(\"\\nHere is how you can use this script\\n\")\n print(\"Usage: python %s\"%sys.argv[0])\n print(\"\\t --input=<file>\")", "def main():\n args = parse_args()\n process_args(args)", "def handle_cmdline_args():\n\n parser = argparse.ArgumentParser(\n description='Generate synthetic data from a specification in a json '\n 'file using the \"synth-method\" described in the json file. ')\n\n parser.add_argument(\n '-i', dest='infile', required=True,\n help='The input json file. Must contain a \"synth-method\" property')\n\n parser.add_argument(\n '-o', dest='outfile_prefix', required=True, help='The prefix of the output paths (data json and csv), relative to the QUIPP-pipeline root directory')\n\n args = parser.parse_args()\n return args", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv", "def getArg(flag):\n try:\n a = sys.argv[sys.argv.index(flag) + 1]\n except:\n return \"\"\n else:\n return a", "def parse_command_line():\n parser = argparse.ArgumentParser(description='Parses ID\\'s from the DDI compendium search results, and then downloads the html and puts them into a sqlite database.')\n parser.add_argument('-f', '--file', dest='file',\n action='store',\n help='Filenname to be read')\n arg_manager = parser.parse_args()\n return arg_manager", "def get_input():\n parser = argparse.ArgumentParser(description='Parameters')\n parser.add_argument('--host', help='adress of the host')\n parser.add_argument('--port', help='port of IPMI host')\n parser.add_argument('--user', help='user allowed to acces IPMI')\n parser.add_argument('--passwd', help='password for the specific user')\n parser.add_argument('--interval', help='seconds between each data reading')\n parser.add_argument('--nread', help='number of time to collect data')\n parser.add_argument('--store', action='store_true',\n help='save the data collected in a nosql db')\n args = parser.parse_args()\n return args, parser", "def cmd_user(args):", "def getopts():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", type=argparse.FileType('r'),\n required=True, help=\"input file (.csv)\")\n return parser.parse_args()", "def main(argv):\n parsed = parse_args(argv)\n instream = sys.stdin\n name = parsed.name\n if parsed.input_file != \"-\":\n instream = open(parsed.input_file, 'r')\n name = parsed.input_file.split('.')[1]\n print pfm_as_meme_str(parse_scer_pfm(instream, handle_passed=True), name)", "def command_line():\n version = ' '.join([__version__, __build__])\n parser = ArgumentParser(\n prog='moniker',\n description='Simple batch file renaming tool.',\n )\n parser.add_argument(\n '-v', '--version', action='version',\n version=\"%s v%s\" % (basename(sys.argv[0]), version)\n )\n parser.add_argument(\n '--depth',\n type=int,\n default=0,\n metavar='depth',\n help='Tiers of file heiarcy explored',\n )\n parser.add_argument(\n '--replace',\n nargs=2,\n default=('', ''),\n metavar='replace',\n help='glob pattern to match'\n )\n parser.add_argument(\n 'directory',\n default='.',\n help='target directory root',\n )\n return parser", "def get_args():\n if len(sys.argv) == 3:\n return sys.argv[1:]\n print(\"USAGE: python3 extract_cds.py infile outfile\\n\\n\")\n exit()", "def parse_user_input():\n DISC = 'Generate dataset from input files to one csv frame.'\n parser = argparse.ArgumentParser(description=DISC)\n\n # USER ARGS\n parser.add_argument('-raw_dir',\n type=str,\n help='Path to the dir of raw data.',\n required=True\n )\n\n parser.add_argument('-csv_file',\n type=str,\n help='CSV file of the utterances to transform.',\n required=True\n )\n\n parser.add_argument('-feature_dir',\n type=str,\n help='Path to the dir of output feature representations.',\n required=True\n )\n\n parser.add_argument('-feature_type',\n type=str,\n help='Feature representation of the speech signal.',\n required=True\n )\n\n return parser.parse_args()", "def main():\n\tparser = setup_argument_parser()\n\targuments = parser.parse_args()\n\tto_print = arguments.to_print\n\techo(to_print)", "def main(argv: Sequence[Text]) -> None:\n\n\n print(\"TODO\")", "def test_atleast_two_arguments_needed_one():\n cli_result = subprocess.run(\n ['kaiba', 'config.js'],\n capture_output=True,\n )\n assert b'the following arguments are required: input' in cli_result.stderr\n assert cli_result.returncode == 2", "def test_dash_arg():\n parser = CmdParser([dashtest])\n out = parser.parse(\"dashtest -name foo\")\n assert out[0].arguments[0].present == True\n assert out[0].arguments[0].value == \"foo\"\n assert out[0].as_shell_string() == \"dashtest -name foo\"", "def _get_input_from_argv():\n payload_index = sys.argv.index('--') + 1\n params = sys.argv[payload_index:]\n if not params:\n raise ValueError(\n \"A JSON payload was expected after the -- delimiter, but none \"\n \"was found.\")\n return ' '.join(params)", "def input_args():\n return filter(lambda x: len(x) > 0,\n map(lambda x: x.strip(), sys.argv[1:]))", "def parse_argument(argument_option):\n index = sys.argv.index(argument_option)\n try:\n argument = sys.argv[index+1]\n except IndexError:\n print('ERROR: Invalid argument!')\n print(__doc__)\n print(unittest.main.__doc__)\n else:\n sys.argv.pop(index)\n sys.argv.pop(index)\n return argument", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('_', nargs='*')\n args = parser.parse_args()\n\n print(\"Arguments: \" + str(args._))\n\n return 0", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n # All reference encoders\n parser.add_argument(\"--step\", dest=\"step\", default=\"10\", type=int, help=\"step size\")\n parser.add_argument(\"--repeats\", dest=\"repeats\", type=int, default=1, help=\"repeats\")\n\n parser.add_argument(dest=\"image\", default=None,\n help=\"select the test image to run\")\n\n args = parser.parse_args()\n return args", "def readArgs():\n args = sys.argv\n if len(args) != 3:\n print(\"ERROR - Wrong number of arguments! \\n\")\n print(\"Usage: plotGantt.py TYPE path/to/result/file.gantt \\n where TYPE is : MTS / SCH\")\n exit(5)\n if args[1] != \"MTS\" and args[1] != \"SCH\":\n print(\"ERROR - Wrong type specified! : \" + args[1])\n print(\"Usage: plotGantt.py TYPE path/to/result/file.gantt \\n where TYPE is : MTS / SCH\")\n return args", "def cmd_stru(args):", "def main(self):\n cmd = \"self.%s(sys.stdin)\" % sys.argv[1]\n exec(cmd)", "def parse_arg():\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', type=str, default='stylegan', choices=['vanilla', 'stylegan'])\n parser.add_argument('--mode', type=str, default='sample', choices=['sample', 'project', 'draw', 'interpolate'])\n parser.add_argument('--latent', type=str, default='z', choices=['z', 'w', 'w+'])\n parser.add_argument('--n_iters', type=int, default=1000, help=\"number of optimization steps in the image projection\")\n parser.add_argument('--perc_wgt', type=float, default=0., help=\"perc loss lambda\")\n parser.add_argument('--input', type=str, default='data/cat/*.png', help=\"path to the input image\")\n return parser.parse_args()", "def getArgs():\r\n parser = argparse.ArgumentParser(\r\n description = \"\"\"This program uses the validation data and a given model to do brain segmentation that will be sent to FeTs challenge to get evaluated \"\"\")\r\n parser.add_argument(\"-d\", type = str, help = \"d is the path to validation dataset, e.g: C:/Documents/MICCAI_FeTS2021_TrainingData/\")\r\n parser.add_argument(\"-m\", type = str, help = \"m is the path for the model to load, e.g: C:/Documents/MICCAI_FeTS2021_TrainingData/cpt/cpt_0_1\")\r\n parser.add_argument(\"-o\", type = str, help = \"o is the output path, e.g: C:/Documents/inferences\")\r\n # Get your arguments\r\n return parser.parse_args()", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"text\",\n help=\"text to convert (default: read from stdin)\",\n nargs=\"?\",\n default=sys.stdin,\n )\n parser.add_argument(\n \"-u\", \"--uppercase\", help=\"convert to uppercase\", action=\"store_true\"\n )\n parser.add_argument(\n \"-s\", \"--spaces\", help=\"add spaces between characters\", action=\"store_true\"\n )\n args = parser.parse_args()\n\n text = read_input(args, parser)\n processed_text = process_text(text, args)\n print(processed_text)", "def main():\n try:\n string = sys.argv[1]\n substring = sys.argv[2]\n\n except IndexError:\n string = None\n substring = None\n\n try:\n sys.argv[3]\n\n except IndexError:\n pass\n\n else:\n print(\" More than expected Number of Arguments\")\n string = None\n substring = None\n\n RabinKarp(string, substring)", "def main_parse_args():\n parser = ArgumentParser()\n parser.add_argument('infile', help='path to the file to be mapped.It should\\\n contain one identifer on each line.')\n parser.add_argument('-rh', '--redis_host', default=DEFAULT_REDIS_URL,\n help='url of Redis db')\n parser.add_argument('-rp', '--redis_port', default=DEFAULT_REDIS_PORT,\n help='port for Redis db')\n parser.add_argument('-rps', '--redis_pass', default=DEFAULT_REDIS_PASS,\n help='password for Redis db')\n parser.add_argument('-of', '--outfile', default=None,\n help='path to the output file')\n parser.add_argument('-sh', '--source_hint', help='suggestion for ID source \\\n database used to resolve ambiguities in mapping',\n default=DEFAULT_HINT)\n parser.add_argument('-t', '--taxon', help='taxon id of species of all gene \\\n names', default=DEFAULT_TAXON)\n myargs = parser.parse_args()\n return myargs", "def cli() -> object:\n parser = argparse.ArgumentParser(description=\"Expression Compiler\")\n parser.add_argument(\"sourcefile\", type=argparse.FileType('r'),\n help=\"Source program text\")\n parser.add_argument(\"outfile\", type=argparse.FileType('w'),\n nargs=\"?\", default=sys.stdout,\n help=\"Output file for assembly code\")\n args = parser.parse_args()\n return args", "def parse_cmdline():\n\tparser = ArgumentParser(prog=\"FastP_QC.py\", description=\"\"\"Script collects stats from fastp jsons.\"\"\")\n\tparser.add_argument(\"-r1\", \"--r1_stats\", dest=\"r1_stats\", action=\"store\", required=True, help=\"Text file with r1 stats, from q30.py script.\")\n\tparser.add_argument(\"-r2\", \"--r2_stats\", dest=\"r2_stats\", action=\"store\", required=True, help=\"Text file with r2 stats, from q30.py script.\")\n\tparser.add_argument(\"-n\", \"--name\", dest=\"name\", action=\"store\", required=True, help=\"Sample name\")\n\targs = parser.parse_args()\n\treturn args", "def command_line_arguments():\n\n try:\n parser = argparse.ArgumentParser(description='Log Handler/Cleaner/Copier for Idemia DocAuth')\n\n # Add required arguments.\n parser.add_argument('action', choices=['clean', 'download'], type=str, help='clean or download')\n\n # Parse the arguments\n args = parser.parse_args()\n\n return args\n\n except Exception as err:\n print(err)\n return", "def _parse_args():\n parser = argparse.ArgumentParser(description='Run DAFI.')\n parser.add_argument('input_file', help='Name (path) of input file')\n return parser.parse_args()", "def cli() -> None:", "def cli() -> None:", "def argument_parser():\n parser = argparse.ArgumentParser(\n description=\"Validates that the HRC files are correct in a Potree Octtree\")\n parser.add_argument('-i','--input',default='',help='Input folder with the Potree Octtree',type=str, required=True)\n return parser", "def hotfix_deepobs_argparse():\n sys.argv = sys.argv[:1]", "def cli():\n parser = argparse.ArgumentParser()\n # add arguments\n parser.add_argument('image_path', type = str, default = './flowers/test/1/image_06743.jpg', help ='Directory of Image of testing')\n parser.add_argument('checkpoint', type = str, default = 'checkpoint.pth', help ='Directory to save checkpoints')\n parser.add_argument('--top_k', action = 'store', dest = 'top_k', type = int, default = 5)\n parser.add_argument('--category_names', action='store', dest='category_names', type=str, default='cat_to_name.json')\n parser.add_argument('--gpu', action = 'store', default = False, help = 'GPU mode')\n return parser.parse_args()", "def get_args():\n parser = argparse.ArgumentParser(description='\"Some Input argu for Generate Fragsize Distribution.')\n parser.add_argument('-i', '--input', required=True, action='store',\n help=\"Input File of .molucule.table\")\n parser.add_argument('-o', '--output', required=True, action='store',\n help=\"Output File of .fragsize_distribution.txt\")\n \n return parser.parse_args()", "def get_commandline_argument(argument: str, argument_list: list) -> str:\n length = len(argument_list)\n if length == 0:\n print('get_commandline_argument(): Error, empty argument_list passed, exiting.')\n exit(1)\n\n if str(argument) == '':\n # No argument passed.\n return ''\n\n if length == 1:\n # The argument list contains the script name only.\n return ''\n\n for i in range(1, length - 1):\n if str(argument_list[i]) == str(argument):\n if i + 1 <= length:\n # Only get the next index if we are still in the array bounds.\n return str(argument_list[i + 1])\n return ''", "def handle_input():\n\n command = None\n\n while command != \"quit\":\n input_string = raw_input(\"HBA Database> \")\n tokens = input_string.split('|')\n command = tokens[0]\n args = tokens[1:]\n\n if command == \"student\":\n github = args[0] #works\n get_student_by_github(github)\n\n elif command == \"new_student\":\n first_name, last_name, github = args[:3] # unpack!\n make_new_student(first_name, last_name, github)\n\n elif command == \"get_project_by_title\":\n title = args[0] #works\n get_project_by_title(title)\n\n elif command == \"get_grade_by_github_title\":\n github, title = args[:2]\n get_grade_by_github_title(github, title)\n\n elif command == \"assign_grade\":\n github, title, grade = args[:3]\n assign_grade(github, title, grade)\n\n elif command == \"add_project\":\n title, description, max_grade = args[:3]\n add_project(title, description, max_grade)\n\n elif command == \"get_grade_by_student\":\n first_name = args[0] #doesn't work\n get_grade_by_student(first_name)", "def segment_from_command_line(args):\n\n input_file = BedTool(args.input)\n # Segment the input file\n return segment(input_file, args.method, p0=args.p0, prior=args.prior)", "def specify_parser():\n parser = ArgumentParser()\n\n parser.add_argument('input', nargs='?', type=FileType('r'), default=sys.stdin)\n parser.add_argument('--datafile', dest=\"datafile\", required=True, nargs=1, type=FileType('r'))\n return parser", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"print random line.\", epilog=\"Epilog of program.\", add_help=True\n )\n\n # optional input\n parser.add_argument(\n \"filename\",\n nargs=\"?\",\n help=\"filename to print random line from\",\n type=argparse.FileType(\"r\"),\n default=sys.stdin,\n )\n\n return parser.parse_args()", "def __main__():\n parser = argparse.ArgumentParser(description='Facebook ID Finder')\n parser.add_argument('--username', '-u', dest='username', help='Username to search for')\n parser.add_argument('--version', '-v', action='version', version='%(prog)s 0.1')\n args = parser.parse_args()\n ausername = args.username\n\n if not args.username:\n sys.exit(parser.print_help())\n else:\n search_username(ausername)", "def main():\n parser = argparse.ArgumentParser(description=\"\"\"Tester for YT Data API and different inputs\"\"\")\n parser.add_argument('-a', '--analytics', help='Performs a basic analytics lookup for the user\\'s channel entered')\n parser.add_argument('-c', '--comments', help='Performs a lookup of comments for the video id entered')\n args = parser.parse_args()\n\n if args.analytics:\n analytics = args.analytics\n analyt(analytics)\n\n if args.comments:\n comments = args.comments\n get_comments(comments)", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def main():\r\n\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(dest='INFILE')\r\n parser.add_argument(dest='OUTFILE')\r\n\r\n args = parser.parse_args()\r\n function1(args.INFILE, args.OUTFILE)", "def cmdline_main():\r\n import sys\r\n if (len(sys.argv) < 2 or len(sys.argv) > 4 or \"--help\" in sys.argv or\r\n \"-h\" in sys.argv or sys.argv[1] not in (\"-c\", \"-d\")):\r\n print(\"Usage: python -m snappy <-c/-d> [src [dst]]\")\r\n print(\" -c compress\")\r\n print(\" -d decompress\")\r\n print(\"output is stdout if dst is omitted or '-'\")\r\n print(\"input is stdin if src and dst are omitted or src is '-'.\")\r\n sys.exit(1)\r\n\r\n if len(sys.argv) >= 4 and sys.argv[3] != \"-\":\r\n dst = open(sys.argv[3], \"wb\")\r\n elif hasattr(sys.stdout, 'buffer'):\r\n dst = sys.stdout.buffer\r\n else:\r\n dst = sys.stdout\r\n\r\n if len(sys.argv) >= 3 and sys.argv[2] != \"-\":\r\n src = open(sys.argv[2], \"rb\")\r\n elif hasattr(sys.stdin, \"buffer\"):\r\n src = sys.stdin.buffer\r\n else:\r\n src = sys.stdin\r\n\r\n if sys.argv[1] == \"-c\":\r\n method = stream_compress\r\n else:\r\n method = stream_decompress\r\n\r\n method(src, dst)", "def call_prog(args):\n # Just dump the entirety of the command so that\n # the user can specify whatever arguments they want\n call(args)" ]
[ "0.68680656", "0.67012274", "0.67012274", "0.66923463", "0.66923463", "0.66071945", "0.64984864", "0.6440137", "0.63924545", "0.6384571", "0.6353774", "0.63000405", "0.6298342", "0.62937367", "0.6274231", "0.6258619", "0.6252007", "0.62427706", "0.621087", "0.61711746", "0.6156566", "0.6146984", "0.61303604", "0.61265117", "0.6122675", "0.6121437", "0.61117035", "0.6109215", "0.61077636", "0.60983336", "0.6073003", "0.6067238", "0.60483104", "0.60306627", "0.60261804", "0.6015566", "0.6013403", "0.6013403", "0.6013403", "0.6013403", "0.6013403", "0.6013403", "0.6013403", "0.6013403", "0.6007978", "0.59921324", "0.59920096", "0.5983493", "0.5970742", "0.59643275", "0.5955502", "0.59530073", "0.5940436", "0.5934396", "0.5932683", "0.59171677", "0.5890722", "0.5877369", "0.5877369", "0.58761305", "0.5874561", "0.58724195", "0.5867481", "0.5858614", "0.58585525", "0.58540946", "0.58470535", "0.584642", "0.5846215", "0.58458036", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.5841326", "0.58406276", "0.5824756" ]
0.0
-1
Read temp from a file
def f2c_file_read_function(): with open('data.txt', 'r') as infile: data = [i.strip().split() for i in infile] # store data as list F = float(data[-1][-1]) # last item in data should be value C = 5/9.0*F - 32 print("The temperatire in Celcius is {:g}".format(C))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_file(self, *args):\n with open(os.path.join(self.temp_path, *args)) as fp:\n return fp.read()", "def read_file(self, file_name):\n f = file(file_name, \"r\")\n temp = f.read()\n f.close()", "def GetFileContents(self, filename):\n with tempfile.NamedTemporaryFile() as t:\n self.GetFile(filename, t.name)\n with open(t.name, 'r') as f2:\n res = f2.read()\n logging.debug(\"GetFileContents(%s)->%s\" % (filename, res))\n return res", "def read(self, filename):\r\n with temporary_file() as fp:\r\n os.unlink(fp.name)\r\n if self._call(\"-copyToLocal\", filename, fp.name) == 0:\r\n with open(fp.name) as f:\r\n return f.read()\r\n else:\r\n return None", "def read(path):", "def read_file(self, filepath):\n outputfile = tempfile.SpooledTemporaryFile(\n max_size=dbbackup_settings.TMP_FILE_MAX_SIZE,\n dir=dbbackup_settings.TMP_DIR)\n self.ftp.retrbinary('RETR ' + filepath, outputfile.write)\n return outputfile", "def read(self, filename):\n pass", "def read(self, filename):\n pass", "def GetFileContents(self, filename):\n logging.debug(\"GetFileContents(%s)\" % (filename))\n with tempfile.NamedTemporaryFile(mode='w') as t:\n self.GetFile(filename, t.name)\n with open(t.name, 'r', encoding='UTF-8') as f2:\n res = f2.read()\n logging.debug(\"GetFileContents(%s)->%s\" % (filename, res))\n return res", "def read(self):\n if not self._has_tmp_file_path():\n return self._content\n\n with open(self._get_tmp_file_path(), \"r\") as tmp_file:\n file_contents = tmp_file.read()\n return file_contents", "def _read_xid_from_fp_xid_temp_and_delete_file(self, fn_temp):\n\t\tif os.path.isfile(fn_temp): \n\t\t\tif os.stat(fn_temp).st_size > 0:\n\t\t\t\txid = at.Table.read(fn_temp, format='ascii.csv', comment='#')\n\t\t\t\tos.remove(fn_temp)\n\t\t\t\txid = self._xid_pick_only_closest(xid)\n\t\t\telse: \n\t\t\t\tprint(\"[hscObj] no object found - xid file empty\")\n\t\t\t\tos.remove(fn_temp)\n\t\t\t\txid = None\n\t\telse: \n\t\t\tprint(\"[hscObj] query failed\")\n\t\t\txid = None\n\t\treturn xid", "def read_temperature():\n temp = 0.0\n with open(\"daily_temp.txt\", \"r\") as f:\n temp = float(f.readline())\n\n return temp", "def read(fname):\n f = fabio.open(fname)\n data = f.data\n del f; # close file\n return data", "def get_temp(self):\n lines = self._get_temp_raw()\n\n while not self._is_successful_read(lines):\n time.sleep(0.2)\n lines = self._get_temp_raw()\n \n try: \n temp_file_location = lines[1].find('t=')\n except: \n print(\"ERROR: w1_slave file corrupted. No t= found.\")\n \n if temp_file_location is not -1:\n temp_string = lines[1][temp_file_location+2:]\n temp = float(temp_string) / 1000.0\n return temp", "def read_from_file(self, filename: str) -> None:", "def _get_temp_raw(self):\n try: \n f = open(self.device_file, 'r')\n lines = f.readlines()\n f.close()\n return lines\n\n except: \n print(\"ERROR: w1_slave file could not be opened (temp sensor)\")", "def read_file(path_to_file):\n 8", "def _read_raw_temperature():\n with open(device_file, 'r') as f:\n content = f.readlines()\n return content", "def read_file(file):\n with open(file, \"r\") as fid:\n return fid.read()", "def __load_temp_file(self):\r\n if not osp.isfile(self.file_path):\r\n # Creating temporary file\r\n default = ['# -*- coding: utf-8 -*-',\r\n '\"\"\"',\r\n self.tr(\"Pydee Editor\"),\r\n '',\r\n self.tr(\"This temporary script file is located here:\"),\r\n self.file_path,\r\n '\"\"\"',\r\n '',\r\n '',\r\n ]\r\n text = \"\\r\\n\".join([unicode(qstr) for qstr in default])\r\n encoding.write(unicode(text), self.file_path, 'utf-8')\r\n self.load(self.file_path)", "def read_file(self):\n self._apply_shared_lock()\n\n self.handle = self._open_file_r()\n out = self._deserialize(self.handle)\n self.handle.close()\n\n self._release_lock()\n\n return out", "def read_whole_file(self, file_handle):\n # reads in a whole file given a file handle\n \n temp_str = \"\"\n for each_line in file_handle.xreadlines():\n temp_str += each_line\n return temp_str", "def _read_file(file_name):\n file_handle = file(file_name)\n try:\n return file_handle.read()\n finally:\n file_handle.close()", "def read_file():\n with open(FILE_NAME) as f:\n data = f.read()\n return data", "def read(self, filename):\n raise NotImplementedError", "def read_temp(filename=None):\n if not filename:\n filename = settings.TEMP_FILENAME\n\n return pd.read_csv(filename, sep=';', parse_dates=[3],\n dtype={0: object, 2: object, 3: object})", "def read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()", "def read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()", "def read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read().strip()", "def _read(fname):\n fpath = os.path.dirname(__file__)\n fpath = os.path.join(fpath, fname)\n with open(fpath, 'r') as file_:\n return file_.read()", "def _ReadFile(filepath):\n with open(filepath) as f:\n return f.read()", "def read_local_file(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()", "def read_file(self, file):\n fd = open(file)\n data = fd.read()\n fd.close()\n return data", "def read(file):\n with open(file, 'r') as file:\n return file.read()", "def read_file(self, path):\n with open(path) as f:\n return self.read_file_obj(f)", "def readFromFile(filename):\n raise NotImplementedError", "def readTempSensor(sensorName) :\n f = open(sensorName, 'r')\n lines = f.readlines()\n f.close()\n return lines", "def myReadFile( path):\n f = open(path,'r')\n result = f.readlines()\n f.close\n return result", "def local_read(filename):\n full_filename = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n filename)\n return codecs.open(full_filename, 'r', 'utf-8').read()", "def read_file(fname):\n with open(fname, 'r') as fopen:\n fdata = fopen.read()\n return fdata", "def _read_file(self) -> str:\n with open(self._file_name) as fp:\n return fp.read()", "def readFastaFile(filename):", "def _read_file(self, filePath):\n with open(filePath) as f:\n fileContent = f.read()\n f.close()\n return fileContent.strip()", "def open_and_read_file(file_path):\n\n # your code goes here\n return open(file_path).read()", "def file_read(path: str) -> str:\n if os.path.isfile(path):\n while True:\n try:\n with open(path, \"r\") as fptr:\n return fptr.read()\n except PermissionError:\n pass\n return \"\"", "def read(cls):\n x_i=\"vas.txt\"\n with open(x_i, 'r')as txt_file:\n file = txt_file.read()\n return file", "def readfile(fname, mode='rb'):\n f = open(fname, mode)\n raw = f.read()\n f.close()\n return raw", "def _file_read(fname):\n if not os.path.exists(fname):\n parser.error(\"File '{0}' not found.\".format(fname))\n return open(fname, 'r')", "def read1(cls):\n x_i = \"vas.txt\"\n with open(x_i, 'r')as txt_file:\n file = txt_file.readlines()\n return file", "def read_file(name_file):\n with open(name_file, 'r') as file:\n return file.read()", "def missing_but_potential_file():\r\n tempf = tempfile.NamedTemporaryFile()\r\n fname = tempf.name\r\n tempf.close()\r\n return fname", "def readfile(name, outstream, start=0, end=None):", "def open_and_read_file(file_path):\n\n # your code goes here\n file_ = open(file_path).read()\n\n return file_", "def processTempLog(file_name):", "def open_and_read_file(file_path):\n text_data = open(file_path).read()\n # print text_data\n return text_data", "def read(self, args):\n assert self.exists(args=args)\n file_path = self.path(args)\n file_str = autofile.read_file(file_path)\n file_dat = self.reader_(file_str)\n assert self.checker_(file_dat)\n return file_dat", "def read_file(filename):\n with open(filename) as fp:\n return fp.read()", "def file_read(file_location, mode='r'):\n with open(file_location, mode=mode) as file:\n file_data = file.read()\n return file_data", "def read_file(self, file_path): \n logging.info('Lendo arquivo de {0}'.format(file_path))\n file_with_tags = open(file_path, \"r\", encoding='utf-8')\n return file_with_tags.readlines()", "def read_file(filename):\n return open(filename).read()", "def open_and_read_file(file_path):\n\n file = open(file_path)\n file = file.read()\n\n return file", "def read_local_file(filename):\n import fsspec\n fs = fsspec.filesystem('file')\n\n with fs.open(filename) as f:\n data = loads(f.read())\n\n return data", "def read_file(file_name):\n with open(file_name, \"r\") as f:\n return f.read()", "def read(fn):\n with open(os.path.join(os.path.dirname(__file__), fn), encoding='utf-8') as f:\n return f.read()", "def readFromFile(self, path):\n log(logging.DEBUG, \"Read from file: \" + path)\n with open(path, \"r\") as f:\n return f.read()", "def read(self):\n\t\tself.file.seek(0)\n\t\treturn self.file.read().strip()", "def read_file(file_name):\n with open(file_name, 'r') as f:\n return f.read()", "def _Read(filename):\n with open(filename, 'rb') as f:\n return f.read()", "def readFile(self, fname):\n res = None\n with open(self.PATH + fname, 'rb') as handle:\n res = pickle.load(handle)\n return res", "def ReadFile(path, mode='r'):\n with open(path, mode) as f:\n return f.read()", "def read_file(file_path):\n with open(file_path, 'r') as infile:\n return infile.read()", "def read(filename):\n with open(os.path.join(os.path.dirname(__file__), filename)) as f:\n return f.read()", "def read(path):\n with open(path) as f:\n return f.read()", "def read_file(file_name):\n return open(os.path.join(os.path.dirname(os.path.dirname(__file__)), file_name)).read()", "def read(self, filename): # real signature unknown; restored from __doc__\n pass", "def read(name):\n\n return open(name).read()", "def make_temp_file():\n with tempfile.NamedTemporaryFile() as f:\n return f.name", "def read_from_file(path):\n with io.open(path, 'rb') as ios:\n return read(ios)", "def open_and_read_file(file_path):\n contents = open(file_path).read()\n # your code goes here\n\n return contents", "def readFromTextFile(self, file_name):\n with open(file_name, 'r') as file_obj:\n return file_obj.read()", "def readfile(filename):\n with open(filename, encoding=\"utf-8\") as file:\n raw = file.read()\n return raw", "def read_template(file_name):\n infile = open(file_name, 'r')\n return infile.read()", "def test_read():\n f = open('test', mode='r')\n line = f.read()\n f.close()", "def open_and_read_file(file_path):\n\n # your code goes here\n with open(file_path) as open_file:\n open_file = open_file.read()\n return open_file", "def rawinputfile():\n ifile = tempfile.NamedTemporaryFile(suffix='.csv', mode='a', delete=False)\n ifile.write(RAWINPUTFILE_CONTENTS)\n ifile.close()\n # must close and then yield for Windows platform\n yield ifile\n if os.path.isfile(ifile.name):\n try:\n os.remove(ifile.name)\n except OSError:\n pass # sometimes we can't remove a generated temporary file", "def readfile(path, outstream, start=0, end=None):", "def handle_file(filename,operation = 'r'):\n with open(filename,operation) as f:\n data = f.readlines()\n return data", "def _tmpfile(*args, **kwargs):\n with NamedTemporaryFile(prefix='test_parser', suffix='.tmp', delete=False) as tmp:\n fpath = tmp.name\n fh = open(fpath, *args, **kwargs)\n file_handles.append(fh)\n return fh", "def read_from_file(file_name):\n with open(file_name, \"rb\") as text_file:\n return text_file.read()", "def read_file(file):\n f = open(file, 'r')\n print(f.read())", "def simple_text_reader(text_file):\n with open(text_file, 'rt') as file:\n data = file.read()\n return data", "def get_contents( path, name, verbose = False, get_config=lambda: {} ):\n t_file_fh, t_file_name = tempfile.mkstemp()\n os.close(t_file_fh)\n try:\n fs_mod.fs_get( path+\"/\"+name, t_file_name, get_config )\n except:\n if verbose:\n print(\"get_contents exception:\",traceback.format_exc(), file=sys.stderr)\n return \"\"\n contents = open(t_file_name,\"r\").read()\n os.remove(t_file_name)\n return contents", "def open_file(filename):\n print(\"filename: %s\\n\" % filename)\n if filename:\n return open(filename, \"w\")\n else:\n return tempfile.NamedTemporaryFile(delete=False)", "def read_file(filename):\n f = open(filename)\n contents = f.read()\n f.close()\n return contents", "def getFileContents(filename, mode=\"r\", encoding=None):\n\n with withFileLock(\"reading file %s\" % filename):\n with openTextFile(filename, mode, encoding=encoding) as f:\n return f.read()", "def read(cls, path):\n with cls.open(path, 'rt') as fd:\n return fd.read()", "def read (self, file):\n\t\tself.unpack (file.read (self.size()))", "def read():\n # TODO", "def readFile1():\n global data1\n file1 = open(\"t1.txt\",\"r\")\n data1=file1.read()", "def read_file(self, fname, name):\r\n self.filename = name\r\n if fname != \".\":\r\n self.fname = f\"{fname}\\\\\"\r\n self.pathread = os.path.join(self.p, self.fname)\r\n else:\r\n self.pathread = self.p\r\n try:\r\n self.path = os.path.join(self.pathread, self.filename)\r\n with open(self.path, 'r') as read:\r\n self.data = read.readlines()\r\n except Exception as error:\r\n return error\r\n finally:\r\n send = \" \".join(self.data)\r\n return send", "def read_file(filename):\n if os.path.isfile(filename):\n with open(filename, 'r') as f:\n return f.read()" ]
[ "0.7447779", "0.7349098", "0.6541486", "0.65207756", "0.6515084", "0.64710087", "0.6465426", "0.6465426", "0.6429184", "0.6283047", "0.62467045", "0.62438434", "0.6242521", "0.62389153", "0.62389153", "0.6197708", "0.617634", "0.6135359", "0.61328983", "0.60876274", "0.60842836", "0.60427576", "0.6012975", "0.6009691", "0.60034204", "0.598985", "0.5978948", "0.5978948", "0.59507954", "0.5946499", "0.5938786", "0.59282184", "0.592501", "0.59132016", "0.5871316", "0.5846786", "0.5841136", "0.58281195", "0.58275026", "0.58272994", "0.5812052", "0.58074814", "0.5804668", "0.5799681", "0.5796659", "0.5782274", "0.5764329", "0.5760583", "0.5759387", "0.5756233", "0.57470167", "0.5745783", "0.5745727", "0.57424235", "0.57324386", "0.5731832", "0.57109785", "0.5710879", "0.57019037", "0.5696183", "0.5677473", "0.5676199", "0.5675119", "0.56749356", "0.5673839", "0.5670095", "0.56698805", "0.566818", "0.5662238", "0.56614196", "0.5660265", "0.5653889", "0.56447875", "0.56209564", "0.56166726", "0.56140155", "0.5610521", "0.5609368", "0.5608419", "0.56074214", "0.5600455", "0.5600028", "0.55772674", "0.55546564", "0.55536485", "0.55512995", "0.5550532", "0.55473775", "0.55452096", "0.5544777", "0.55441755", "0.5539566", "0.55381763", "0.5537471", "0.5537156", "0.55364484", "0.55345875", "0.55282664", "0.55264246", "0.55244076", "0.55213505" ]
0.0
-1
Read temp from a file
def f2c_file_read_write_function(): with open('Fdeg.dat', 'r') as infile: data = [i.strip().split() for i in infile] # store data as list data = data[3:] # get lines with numerical values only F_list = [float(line[-1]) for line in data] C_list = [5/9.0*F - 32 for F in F_list] for i in range(len(C_list)): print("{:6g}F {:10.2f}C".format(F_list[i], C_list[i])) return F_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_file(self, *args):\n with open(os.path.join(self.temp_path, *args)) as fp:\n return fp.read()", "def read_file(self, file_name):\n f = file(file_name, \"r\")\n temp = f.read()\n f.close()", "def GetFileContents(self, filename):\n with tempfile.NamedTemporaryFile() as t:\n self.GetFile(filename, t.name)\n with open(t.name, 'r') as f2:\n res = f2.read()\n logging.debug(\"GetFileContents(%s)->%s\" % (filename, res))\n return res", "def read(self, filename):\r\n with temporary_file() as fp:\r\n os.unlink(fp.name)\r\n if self._call(\"-copyToLocal\", filename, fp.name) == 0:\r\n with open(fp.name) as f:\r\n return f.read()\r\n else:\r\n return None", "def read(path):", "def read_file(self, filepath):\n outputfile = tempfile.SpooledTemporaryFile(\n max_size=dbbackup_settings.TMP_FILE_MAX_SIZE,\n dir=dbbackup_settings.TMP_DIR)\n self.ftp.retrbinary('RETR ' + filepath, outputfile.write)\n return outputfile", "def read(self, filename):\n pass", "def read(self, filename):\n pass", "def GetFileContents(self, filename):\n logging.debug(\"GetFileContents(%s)\" % (filename))\n with tempfile.NamedTemporaryFile(mode='w') as t:\n self.GetFile(filename, t.name)\n with open(t.name, 'r', encoding='UTF-8') as f2:\n res = f2.read()\n logging.debug(\"GetFileContents(%s)->%s\" % (filename, res))\n return res", "def read(self):\n if not self._has_tmp_file_path():\n return self._content\n\n with open(self._get_tmp_file_path(), \"r\") as tmp_file:\n file_contents = tmp_file.read()\n return file_contents", "def _read_xid_from_fp_xid_temp_and_delete_file(self, fn_temp):\n\t\tif os.path.isfile(fn_temp): \n\t\t\tif os.stat(fn_temp).st_size > 0:\n\t\t\t\txid = at.Table.read(fn_temp, format='ascii.csv', comment='#')\n\t\t\t\tos.remove(fn_temp)\n\t\t\t\txid = self._xid_pick_only_closest(xid)\n\t\t\telse: \n\t\t\t\tprint(\"[hscObj] no object found - xid file empty\")\n\t\t\t\tos.remove(fn_temp)\n\t\t\t\txid = None\n\t\telse: \n\t\t\tprint(\"[hscObj] query failed\")\n\t\t\txid = None\n\t\treturn xid", "def read_temperature():\n temp = 0.0\n with open(\"daily_temp.txt\", \"r\") as f:\n temp = float(f.readline())\n\n return temp", "def read(fname):\n f = fabio.open(fname)\n data = f.data\n del f; # close file\n return data", "def get_temp(self):\n lines = self._get_temp_raw()\n\n while not self._is_successful_read(lines):\n time.sleep(0.2)\n lines = self._get_temp_raw()\n \n try: \n temp_file_location = lines[1].find('t=')\n except: \n print(\"ERROR: w1_slave file corrupted. No t= found.\")\n \n if temp_file_location is not -1:\n temp_string = lines[1][temp_file_location+2:]\n temp = float(temp_string) / 1000.0\n return temp", "def read_from_file(self, filename: str) -> None:", "def _get_temp_raw(self):\n try: \n f = open(self.device_file, 'r')\n lines = f.readlines()\n f.close()\n return lines\n\n except: \n print(\"ERROR: w1_slave file could not be opened (temp sensor)\")", "def read_file(path_to_file):\n 8", "def _read_raw_temperature():\n with open(device_file, 'r') as f:\n content = f.readlines()\n return content", "def read_file(file):\n with open(file, \"r\") as fid:\n return fid.read()", "def __load_temp_file(self):\r\n if not osp.isfile(self.file_path):\r\n # Creating temporary file\r\n default = ['# -*- coding: utf-8 -*-',\r\n '\"\"\"',\r\n self.tr(\"Pydee Editor\"),\r\n '',\r\n self.tr(\"This temporary script file is located here:\"),\r\n self.file_path,\r\n '\"\"\"',\r\n '',\r\n '',\r\n ]\r\n text = \"\\r\\n\".join([unicode(qstr) for qstr in default])\r\n encoding.write(unicode(text), self.file_path, 'utf-8')\r\n self.load(self.file_path)", "def read_file(self):\n self._apply_shared_lock()\n\n self.handle = self._open_file_r()\n out = self._deserialize(self.handle)\n self.handle.close()\n\n self._release_lock()\n\n return out", "def read_whole_file(self, file_handle):\n # reads in a whole file given a file handle\n \n temp_str = \"\"\n for each_line in file_handle.xreadlines():\n temp_str += each_line\n return temp_str", "def _read_file(file_name):\n file_handle = file(file_name)\n try:\n return file_handle.read()\n finally:\n file_handle.close()", "def read_file():\n with open(FILE_NAME) as f:\n data = f.read()\n return data", "def read(self, filename):\n raise NotImplementedError", "def read_temp(filename=None):\n if not filename:\n filename = settings.TEMP_FILENAME\n\n return pd.read_csv(filename, sep=';', parse_dates=[3],\n dtype={0: object, 2: object, 3: object})", "def read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()", "def read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()", "def read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read().strip()", "def _read(fname):\n fpath = os.path.dirname(__file__)\n fpath = os.path.join(fpath, fname)\n with open(fpath, 'r') as file_:\n return file_.read()", "def _ReadFile(filepath):\n with open(filepath) as f:\n return f.read()", "def read_local_file(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()", "def read_file(self, file):\n fd = open(file)\n data = fd.read()\n fd.close()\n return data", "def read(file):\n with open(file, 'r') as file:\n return file.read()", "def read_file(self, path):\n with open(path) as f:\n return self.read_file_obj(f)", "def readFromFile(filename):\n raise NotImplementedError", "def readTempSensor(sensorName) :\n f = open(sensorName, 'r')\n lines = f.readlines()\n f.close()\n return lines", "def myReadFile( path):\n f = open(path,'r')\n result = f.readlines()\n f.close\n return result", "def local_read(filename):\n full_filename = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n filename)\n return codecs.open(full_filename, 'r', 'utf-8').read()", "def read_file(fname):\n with open(fname, 'r') as fopen:\n fdata = fopen.read()\n return fdata", "def _read_file(self) -> str:\n with open(self._file_name) as fp:\n return fp.read()", "def readFastaFile(filename):", "def _read_file(self, filePath):\n with open(filePath) as f:\n fileContent = f.read()\n f.close()\n return fileContent.strip()", "def open_and_read_file(file_path):\n\n # your code goes here\n return open(file_path).read()", "def file_read(path: str) -> str:\n if os.path.isfile(path):\n while True:\n try:\n with open(path, \"r\") as fptr:\n return fptr.read()\n except PermissionError:\n pass\n return \"\"", "def read(cls):\n x_i=\"vas.txt\"\n with open(x_i, 'r')as txt_file:\n file = txt_file.read()\n return file", "def readfile(fname, mode='rb'):\n f = open(fname, mode)\n raw = f.read()\n f.close()\n return raw", "def _file_read(fname):\n if not os.path.exists(fname):\n parser.error(\"File '{0}' not found.\".format(fname))\n return open(fname, 'r')", "def read1(cls):\n x_i = \"vas.txt\"\n with open(x_i, 'r')as txt_file:\n file = txt_file.readlines()\n return file", "def read_file(name_file):\n with open(name_file, 'r') as file:\n return file.read()", "def missing_but_potential_file():\r\n tempf = tempfile.NamedTemporaryFile()\r\n fname = tempf.name\r\n tempf.close()\r\n return fname", "def readfile(name, outstream, start=0, end=None):", "def open_and_read_file(file_path):\n\n # your code goes here\n file_ = open(file_path).read()\n\n return file_", "def processTempLog(file_name):", "def open_and_read_file(file_path):\n text_data = open(file_path).read()\n # print text_data\n return text_data", "def read(self, args):\n assert self.exists(args=args)\n file_path = self.path(args)\n file_str = autofile.read_file(file_path)\n file_dat = self.reader_(file_str)\n assert self.checker_(file_dat)\n return file_dat", "def read_file(filename):\n with open(filename) as fp:\n return fp.read()", "def file_read(file_location, mode='r'):\n with open(file_location, mode=mode) as file:\n file_data = file.read()\n return file_data", "def read_file(self, file_path): \n logging.info('Lendo arquivo de {0}'.format(file_path))\n file_with_tags = open(file_path, \"r\", encoding='utf-8')\n return file_with_tags.readlines()", "def read_file(filename):\n return open(filename).read()", "def open_and_read_file(file_path):\n\n file = open(file_path)\n file = file.read()\n\n return file", "def read_local_file(filename):\n import fsspec\n fs = fsspec.filesystem('file')\n\n with fs.open(filename) as f:\n data = loads(f.read())\n\n return data", "def read_file(file_name):\n with open(file_name, \"r\") as f:\n return f.read()", "def read(fn):\n with open(os.path.join(os.path.dirname(__file__), fn), encoding='utf-8') as f:\n return f.read()", "def readFromFile(self, path):\n log(logging.DEBUG, \"Read from file: \" + path)\n with open(path, \"r\") as f:\n return f.read()", "def read(self):\n\t\tself.file.seek(0)\n\t\treturn self.file.read().strip()", "def read_file(file_name):\n with open(file_name, 'r') as f:\n return f.read()", "def _Read(filename):\n with open(filename, 'rb') as f:\n return f.read()", "def readFile(self, fname):\n res = None\n with open(self.PATH + fname, 'rb') as handle:\n res = pickle.load(handle)\n return res", "def ReadFile(path, mode='r'):\n with open(path, mode) as f:\n return f.read()", "def read_file(file_path):\n with open(file_path, 'r') as infile:\n return infile.read()", "def read(filename):\n with open(os.path.join(os.path.dirname(__file__), filename)) as f:\n return f.read()", "def read(path):\n with open(path) as f:\n return f.read()", "def read_file(file_name):\n return open(os.path.join(os.path.dirname(os.path.dirname(__file__)), file_name)).read()", "def read(self, filename): # real signature unknown; restored from __doc__\n pass", "def read(name):\n\n return open(name).read()", "def make_temp_file():\n with tempfile.NamedTemporaryFile() as f:\n return f.name", "def read_from_file(path):\n with io.open(path, 'rb') as ios:\n return read(ios)", "def open_and_read_file(file_path):\n contents = open(file_path).read()\n # your code goes here\n\n return contents", "def readFromTextFile(self, file_name):\n with open(file_name, 'r') as file_obj:\n return file_obj.read()", "def readfile(filename):\n with open(filename, encoding=\"utf-8\") as file:\n raw = file.read()\n return raw", "def read_template(file_name):\n infile = open(file_name, 'r')\n return infile.read()", "def test_read():\n f = open('test', mode='r')\n line = f.read()\n f.close()", "def open_and_read_file(file_path):\n\n # your code goes here\n with open(file_path) as open_file:\n open_file = open_file.read()\n return open_file", "def rawinputfile():\n ifile = tempfile.NamedTemporaryFile(suffix='.csv', mode='a', delete=False)\n ifile.write(RAWINPUTFILE_CONTENTS)\n ifile.close()\n # must close and then yield for Windows platform\n yield ifile\n if os.path.isfile(ifile.name):\n try:\n os.remove(ifile.name)\n except OSError:\n pass # sometimes we can't remove a generated temporary file", "def readfile(path, outstream, start=0, end=None):", "def handle_file(filename,operation = 'r'):\n with open(filename,operation) as f:\n data = f.readlines()\n return data", "def _tmpfile(*args, **kwargs):\n with NamedTemporaryFile(prefix='test_parser', suffix='.tmp', delete=False) as tmp:\n fpath = tmp.name\n fh = open(fpath, *args, **kwargs)\n file_handles.append(fh)\n return fh", "def read_from_file(file_name):\n with open(file_name, \"rb\") as text_file:\n return text_file.read()", "def read_file(file):\n f = open(file, 'r')\n print(f.read())", "def simple_text_reader(text_file):\n with open(text_file, 'rt') as file:\n data = file.read()\n return data", "def get_contents( path, name, verbose = False, get_config=lambda: {} ):\n t_file_fh, t_file_name = tempfile.mkstemp()\n os.close(t_file_fh)\n try:\n fs_mod.fs_get( path+\"/\"+name, t_file_name, get_config )\n except:\n if verbose:\n print(\"get_contents exception:\",traceback.format_exc(), file=sys.stderr)\n return \"\"\n contents = open(t_file_name,\"r\").read()\n os.remove(t_file_name)\n return contents", "def open_file(filename):\n print(\"filename: %s\\n\" % filename)\n if filename:\n return open(filename, \"w\")\n else:\n return tempfile.NamedTemporaryFile(delete=False)", "def read_file(filename):\n f = open(filename)\n contents = f.read()\n f.close()\n return contents", "def getFileContents(filename, mode=\"r\", encoding=None):\n\n with withFileLock(\"reading file %s\" % filename):\n with openTextFile(filename, mode, encoding=encoding) as f:\n return f.read()", "def read(cls, path):\n with cls.open(path, 'rt') as fd:\n return fd.read()", "def read (self, file):\n\t\tself.unpack (file.read (self.size()))", "def read():\n # TODO", "def readFile1():\n global data1\n file1 = open(\"t1.txt\",\"r\")\n data1=file1.read()", "def read_file(self, fname, name):\r\n self.filename = name\r\n if fname != \".\":\r\n self.fname = f\"{fname}\\\\\"\r\n self.pathread = os.path.join(self.p, self.fname)\r\n else:\r\n self.pathread = self.p\r\n try:\r\n self.path = os.path.join(self.pathread, self.filename)\r\n with open(self.path, 'r') as read:\r\n self.data = read.readlines()\r\n except Exception as error:\r\n return error\r\n finally:\r\n send = \" \".join(self.data)\r\n return send", "def read_file(filename):\n if os.path.isfile(filename):\n with open(filename, 'r') as f:\n return f.read()" ]
[ "0.7447779", "0.7349098", "0.6541486", "0.65207756", "0.6515084", "0.64710087", "0.6465426", "0.6465426", "0.6429184", "0.6283047", "0.62467045", "0.62438434", "0.6242521", "0.62389153", "0.62389153", "0.6197708", "0.617634", "0.6135359", "0.61328983", "0.60876274", "0.60842836", "0.60427576", "0.6012975", "0.6009691", "0.60034204", "0.598985", "0.5978948", "0.5978948", "0.59507954", "0.5946499", "0.5938786", "0.59282184", "0.592501", "0.59132016", "0.5871316", "0.5846786", "0.5841136", "0.58281195", "0.58275026", "0.58272994", "0.5812052", "0.58074814", "0.5804668", "0.5799681", "0.5796659", "0.5782274", "0.5764329", "0.5760583", "0.5759387", "0.5756233", "0.57470167", "0.5745783", "0.5745727", "0.57424235", "0.57324386", "0.5731832", "0.57109785", "0.5710879", "0.57019037", "0.5696183", "0.5677473", "0.5676199", "0.5675119", "0.56749356", "0.5673839", "0.5670095", "0.56698805", "0.566818", "0.5662238", "0.56614196", "0.5660265", "0.5653889", "0.56447875", "0.56209564", "0.56166726", "0.56140155", "0.5610521", "0.5609368", "0.5608419", "0.56074214", "0.5600455", "0.5600028", "0.55772674", "0.55546564", "0.55536485", "0.55512995", "0.5550532", "0.55473775", "0.55452096", "0.5544777", "0.55441755", "0.5539566", "0.55381763", "0.5537471", "0.5537156", "0.55364484", "0.55345875", "0.55282664", "0.55264246", "0.55244076", "0.55213505" ]
0.0
-1
Take an argument as input from the command line
def f2c_cml_exc_function(): import sys try: F = float(sys.argv[1]) C = 5/9.0*F - 32 print("The temperatire in Celcius is {:g}".format(C)) except: print("Format should be {} with a temperature in Farenheit" \ .format(sys.argv[0]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cli(args): # noqa; pylint: disable=unused-argument", "def main(args=None):", "def main(args=None):", "def main(args):", "def main(args):", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input\", help=\"Fasta rDNA input\")\n parser.add_argument(\"output\", help=\"GFF annotation\")\n parser.add_argument(\"kingdom\", help=\"Choose kingdom\")\n args = parser.parse_args()\n command(args)", "def main(args):\n cli = CLI()\n # Check arguments\n cli.parse_arguments(args)", "def commandline():\n command_parser = argparse.ArgumentParser(description=__doc__, epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter)\n command_parser.add_argument('-i','--input_file', type=str, required=True, help='input file.')\n command_parser.add_argument('-o','--output_file', type=str, required=True, help='output file.')\n args = command_parser.parse_args()\n return args", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv[1:]", "def parse_arguments(args):", "def get_input_file():\n if len(sys.argv) < 3:\n return -1\n return sys.argv[2]", "def cli(**_) -> None:\n pass", "def cmd_appe(args):", "def main():\r\n args = sys.argv\r\n print 'Script:', args[0]\r\n args.pop(0)\r\n for i, argument in enumerate(sys.argv):\r\n print 'Argument {}: {}'.format(i, argument)\r\n print 'Type: {}'.format(type(argument))", "def main(args=None):\n pass", "def usage():\n\n print(\"\\nHere is how you can use this script\\n\")\n print(\"Usage: python %s\"%sys.argv[0])\n print(\"\\t --input=<file>\")", "def main():\n args = parse_args()\n process_args(args)", "def handle_cmdline_args():\n\n parser = argparse.ArgumentParser(\n description='Generate synthetic data from a specification in a json '\n 'file using the \"synth-method\" described in the json file. ')\n\n parser.add_argument(\n '-i', dest='infile', required=True,\n help='The input json file. Must contain a \"synth-method\" property')\n\n parser.add_argument(\n '-o', dest='outfile_prefix', required=True, help='The prefix of the output paths (data json and csv), relative to the QUIPP-pipeline root directory')\n\n args = parser.parse_args()\n return args", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv", "def getArg(flag):\n try:\n a = sys.argv[sys.argv.index(flag) + 1]\n except:\n return \"\"\n else:\n return a", "def parse_command_line():\n parser = argparse.ArgumentParser(description='Parses ID\\'s from the DDI compendium search results, and then downloads the html and puts them into a sqlite database.')\n parser.add_argument('-f', '--file', dest='file',\n action='store',\n help='Filenname to be read')\n arg_manager = parser.parse_args()\n return arg_manager", "def get_input():\n parser = argparse.ArgumentParser(description='Parameters')\n parser.add_argument('--host', help='adress of the host')\n parser.add_argument('--port', help='port of IPMI host')\n parser.add_argument('--user', help='user allowed to acces IPMI')\n parser.add_argument('--passwd', help='password for the specific user')\n parser.add_argument('--interval', help='seconds between each data reading')\n parser.add_argument('--nread', help='number of time to collect data')\n parser.add_argument('--store', action='store_true',\n help='save the data collected in a nosql db')\n args = parser.parse_args()\n return args, parser", "def cmd_user(args):", "def getopts():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", type=argparse.FileType('r'),\n required=True, help=\"input file (.csv)\")\n return parser.parse_args()", "def main(argv):\n parsed = parse_args(argv)\n instream = sys.stdin\n name = parsed.name\n if parsed.input_file != \"-\":\n instream = open(parsed.input_file, 'r')\n name = parsed.input_file.split('.')[1]\n print pfm_as_meme_str(parse_scer_pfm(instream, handle_passed=True), name)", "def command_line():\n version = ' '.join([__version__, __build__])\n parser = ArgumentParser(\n prog='moniker',\n description='Simple batch file renaming tool.',\n )\n parser.add_argument(\n '-v', '--version', action='version',\n version=\"%s v%s\" % (basename(sys.argv[0]), version)\n )\n parser.add_argument(\n '--depth',\n type=int,\n default=0,\n metavar='depth',\n help='Tiers of file heiarcy explored',\n )\n parser.add_argument(\n '--replace',\n nargs=2,\n default=('', ''),\n metavar='replace',\n help='glob pattern to match'\n )\n parser.add_argument(\n 'directory',\n default='.',\n help='target directory root',\n )\n return parser", "def get_args():\n if len(sys.argv) == 3:\n return sys.argv[1:]\n print(\"USAGE: python3 extract_cds.py infile outfile\\n\\n\")\n exit()", "def parse_user_input():\n DISC = 'Generate dataset from input files to one csv frame.'\n parser = argparse.ArgumentParser(description=DISC)\n\n # USER ARGS\n parser.add_argument('-raw_dir',\n type=str,\n help='Path to the dir of raw data.',\n required=True\n )\n\n parser.add_argument('-csv_file',\n type=str,\n help='CSV file of the utterances to transform.',\n required=True\n )\n\n parser.add_argument('-feature_dir',\n type=str,\n help='Path to the dir of output feature representations.',\n required=True\n )\n\n parser.add_argument('-feature_type',\n type=str,\n help='Feature representation of the speech signal.',\n required=True\n )\n\n return parser.parse_args()", "def main():\n\tparser = setup_argument_parser()\n\targuments = parser.parse_args()\n\tto_print = arguments.to_print\n\techo(to_print)", "def main(argv: Sequence[Text]) -> None:\n\n\n print(\"TODO\")", "def test_atleast_two_arguments_needed_one():\n cli_result = subprocess.run(\n ['kaiba', 'config.js'],\n capture_output=True,\n )\n assert b'the following arguments are required: input' in cli_result.stderr\n assert cli_result.returncode == 2", "def test_dash_arg():\n parser = CmdParser([dashtest])\n out = parser.parse(\"dashtest -name foo\")\n assert out[0].arguments[0].present == True\n assert out[0].arguments[0].value == \"foo\"\n assert out[0].as_shell_string() == \"dashtest -name foo\"", "def _get_input_from_argv():\n payload_index = sys.argv.index('--') + 1\n params = sys.argv[payload_index:]\n if not params:\n raise ValueError(\n \"A JSON payload was expected after the -- delimiter, but none \"\n \"was found.\")\n return ' '.join(params)", "def input_args():\n return filter(lambda x: len(x) > 0,\n map(lambda x: x.strip(), sys.argv[1:]))", "def parse_argument(argument_option):\n index = sys.argv.index(argument_option)\n try:\n argument = sys.argv[index+1]\n except IndexError:\n print('ERROR: Invalid argument!')\n print(__doc__)\n print(unittest.main.__doc__)\n else:\n sys.argv.pop(index)\n sys.argv.pop(index)\n return argument", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('_', nargs='*')\n args = parser.parse_args()\n\n print(\"Arguments: \" + str(args._))\n\n return 0", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n # All reference encoders\n parser.add_argument(\"--step\", dest=\"step\", default=\"10\", type=int, help=\"step size\")\n parser.add_argument(\"--repeats\", dest=\"repeats\", type=int, default=1, help=\"repeats\")\n\n parser.add_argument(dest=\"image\", default=None,\n help=\"select the test image to run\")\n\n args = parser.parse_args()\n return args", "def readArgs():\n args = sys.argv\n if len(args) != 3:\n print(\"ERROR - Wrong number of arguments! \\n\")\n print(\"Usage: plotGantt.py TYPE path/to/result/file.gantt \\n where TYPE is : MTS / SCH\")\n exit(5)\n if args[1] != \"MTS\" and args[1] != \"SCH\":\n print(\"ERROR - Wrong type specified! : \" + args[1])\n print(\"Usage: plotGantt.py TYPE path/to/result/file.gantt \\n where TYPE is : MTS / SCH\")\n return args", "def cmd_stru(args):", "def main(self):\n cmd = \"self.%s(sys.stdin)\" % sys.argv[1]\n exec(cmd)", "def parse_arg():\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', type=str, default='stylegan', choices=['vanilla', 'stylegan'])\n parser.add_argument('--mode', type=str, default='sample', choices=['sample', 'project', 'draw', 'interpolate'])\n parser.add_argument('--latent', type=str, default='z', choices=['z', 'w', 'w+'])\n parser.add_argument('--n_iters', type=int, default=1000, help=\"number of optimization steps in the image projection\")\n parser.add_argument('--perc_wgt', type=float, default=0., help=\"perc loss lambda\")\n parser.add_argument('--input', type=str, default='data/cat/*.png', help=\"path to the input image\")\n return parser.parse_args()", "def getArgs():\r\n parser = argparse.ArgumentParser(\r\n description = \"\"\"This program uses the validation data and a given model to do brain segmentation that will be sent to FeTs challenge to get evaluated \"\"\")\r\n parser.add_argument(\"-d\", type = str, help = \"d is the path to validation dataset, e.g: C:/Documents/MICCAI_FeTS2021_TrainingData/\")\r\n parser.add_argument(\"-m\", type = str, help = \"m is the path for the model to load, e.g: C:/Documents/MICCAI_FeTS2021_TrainingData/cpt/cpt_0_1\")\r\n parser.add_argument(\"-o\", type = str, help = \"o is the output path, e.g: C:/Documents/inferences\")\r\n # Get your arguments\r\n return parser.parse_args()", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"text\",\n help=\"text to convert (default: read from stdin)\",\n nargs=\"?\",\n default=sys.stdin,\n )\n parser.add_argument(\n \"-u\", \"--uppercase\", help=\"convert to uppercase\", action=\"store_true\"\n )\n parser.add_argument(\n \"-s\", \"--spaces\", help=\"add spaces between characters\", action=\"store_true\"\n )\n args = parser.parse_args()\n\n text = read_input(args, parser)\n processed_text = process_text(text, args)\n print(processed_text)", "def main():\n try:\n string = sys.argv[1]\n substring = sys.argv[2]\n\n except IndexError:\n string = None\n substring = None\n\n try:\n sys.argv[3]\n\n except IndexError:\n pass\n\n else:\n print(\" More than expected Number of Arguments\")\n string = None\n substring = None\n\n RabinKarp(string, substring)", "def main_parse_args():\n parser = ArgumentParser()\n parser.add_argument('infile', help='path to the file to be mapped.It should\\\n contain one identifer on each line.')\n parser.add_argument('-rh', '--redis_host', default=DEFAULT_REDIS_URL,\n help='url of Redis db')\n parser.add_argument('-rp', '--redis_port', default=DEFAULT_REDIS_PORT,\n help='port for Redis db')\n parser.add_argument('-rps', '--redis_pass', default=DEFAULT_REDIS_PASS,\n help='password for Redis db')\n parser.add_argument('-of', '--outfile', default=None,\n help='path to the output file')\n parser.add_argument('-sh', '--source_hint', help='suggestion for ID source \\\n database used to resolve ambiguities in mapping',\n default=DEFAULT_HINT)\n parser.add_argument('-t', '--taxon', help='taxon id of species of all gene \\\n names', default=DEFAULT_TAXON)\n myargs = parser.parse_args()\n return myargs", "def cli() -> object:\n parser = argparse.ArgumentParser(description=\"Expression Compiler\")\n parser.add_argument(\"sourcefile\", type=argparse.FileType('r'),\n help=\"Source program text\")\n parser.add_argument(\"outfile\", type=argparse.FileType('w'),\n nargs=\"?\", default=sys.stdout,\n help=\"Output file for assembly code\")\n args = parser.parse_args()\n return args", "def parse_cmdline():\n\tparser = ArgumentParser(prog=\"FastP_QC.py\", description=\"\"\"Script collects stats from fastp jsons.\"\"\")\n\tparser.add_argument(\"-r1\", \"--r1_stats\", dest=\"r1_stats\", action=\"store\", required=True, help=\"Text file with r1 stats, from q30.py script.\")\n\tparser.add_argument(\"-r2\", \"--r2_stats\", dest=\"r2_stats\", action=\"store\", required=True, help=\"Text file with r2 stats, from q30.py script.\")\n\tparser.add_argument(\"-n\", \"--name\", dest=\"name\", action=\"store\", required=True, help=\"Sample name\")\n\targs = parser.parse_args()\n\treturn args", "def command_line_arguments():\n\n try:\n parser = argparse.ArgumentParser(description='Log Handler/Cleaner/Copier for Idemia DocAuth')\n\n # Add required arguments.\n parser.add_argument('action', choices=['clean', 'download'], type=str, help='clean or download')\n\n # Parse the arguments\n args = parser.parse_args()\n\n return args\n\n except Exception as err:\n print(err)\n return", "def _parse_args():\n parser = argparse.ArgumentParser(description='Run DAFI.')\n parser.add_argument('input_file', help='Name (path) of input file')\n return parser.parse_args()", "def cli() -> None:", "def cli() -> None:", "def argument_parser():\n parser = argparse.ArgumentParser(\n description=\"Validates that the HRC files are correct in a Potree Octtree\")\n parser.add_argument('-i','--input',default='',help='Input folder with the Potree Octtree',type=str, required=True)\n return parser", "def hotfix_deepobs_argparse():\n sys.argv = sys.argv[:1]", "def cli():\n parser = argparse.ArgumentParser()\n # add arguments\n parser.add_argument('image_path', type = str, default = './flowers/test/1/image_06743.jpg', help ='Directory of Image of testing')\n parser.add_argument('checkpoint', type = str, default = 'checkpoint.pth', help ='Directory to save checkpoints')\n parser.add_argument('--top_k', action = 'store', dest = 'top_k', type = int, default = 5)\n parser.add_argument('--category_names', action='store', dest='category_names', type=str, default='cat_to_name.json')\n parser.add_argument('--gpu', action = 'store', default = False, help = 'GPU mode')\n return parser.parse_args()", "def get_args():\n parser = argparse.ArgumentParser(description='\"Some Input argu for Generate Fragsize Distribution.')\n parser.add_argument('-i', '--input', required=True, action='store',\n help=\"Input File of .molucule.table\")\n parser.add_argument('-o', '--output', required=True, action='store',\n help=\"Output File of .fragsize_distribution.txt\")\n \n return parser.parse_args()", "def get_commandline_argument(argument: str, argument_list: list) -> str:\n length = len(argument_list)\n if length == 0:\n print('get_commandline_argument(): Error, empty argument_list passed, exiting.')\n exit(1)\n\n if str(argument) == '':\n # No argument passed.\n return ''\n\n if length == 1:\n # The argument list contains the script name only.\n return ''\n\n for i in range(1, length - 1):\n if str(argument_list[i]) == str(argument):\n if i + 1 <= length:\n # Only get the next index if we are still in the array bounds.\n return str(argument_list[i + 1])\n return ''", "def handle_input():\n\n command = None\n\n while command != \"quit\":\n input_string = raw_input(\"HBA Database> \")\n tokens = input_string.split('|')\n command = tokens[0]\n args = tokens[1:]\n\n if command == \"student\":\n github = args[0] #works\n get_student_by_github(github)\n\n elif command == \"new_student\":\n first_name, last_name, github = args[:3] # unpack!\n make_new_student(first_name, last_name, github)\n\n elif command == \"get_project_by_title\":\n title = args[0] #works\n get_project_by_title(title)\n\n elif command == \"get_grade_by_github_title\":\n github, title = args[:2]\n get_grade_by_github_title(github, title)\n\n elif command == \"assign_grade\":\n github, title, grade = args[:3]\n assign_grade(github, title, grade)\n\n elif command == \"add_project\":\n title, description, max_grade = args[:3]\n add_project(title, description, max_grade)\n\n elif command == \"get_grade_by_student\":\n first_name = args[0] #doesn't work\n get_grade_by_student(first_name)", "def segment_from_command_line(args):\n\n input_file = BedTool(args.input)\n # Segment the input file\n return segment(input_file, args.method, p0=args.p0, prior=args.prior)", "def specify_parser():\n parser = ArgumentParser()\n\n parser.add_argument('input', nargs='?', type=FileType('r'), default=sys.stdin)\n parser.add_argument('--datafile', dest=\"datafile\", required=True, nargs=1, type=FileType('r'))\n return parser", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"print random line.\", epilog=\"Epilog of program.\", add_help=True\n )\n\n # optional input\n parser.add_argument(\n \"filename\",\n nargs=\"?\",\n help=\"filename to print random line from\",\n type=argparse.FileType(\"r\"),\n default=sys.stdin,\n )\n\n return parser.parse_args()", "def __main__():\n parser = argparse.ArgumentParser(description='Facebook ID Finder')\n parser.add_argument('--username', '-u', dest='username', help='Username to search for')\n parser.add_argument('--version', '-v', action='version', version='%(prog)s 0.1')\n args = parser.parse_args()\n ausername = args.username\n\n if not args.username:\n sys.exit(parser.print_help())\n else:\n search_username(ausername)", "def main():\n parser = argparse.ArgumentParser(description=\"\"\"Tester for YT Data API and different inputs\"\"\")\n parser.add_argument('-a', '--analytics', help='Performs a basic analytics lookup for the user\\'s channel entered')\n parser.add_argument('-c', '--comments', help='Performs a lookup of comments for the video id entered')\n args = parser.parse_args()\n\n if args.analytics:\n analytics = args.analytics\n analyt(analytics)\n\n if args.comments:\n comments = args.comments\n get_comments(comments)", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def main():\r\n\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(dest='INFILE')\r\n parser.add_argument(dest='OUTFILE')\r\n\r\n args = parser.parse_args()\r\n function1(args.INFILE, args.OUTFILE)", "def cmdline_main():\r\n import sys\r\n if (len(sys.argv) < 2 or len(sys.argv) > 4 or \"--help\" in sys.argv or\r\n \"-h\" in sys.argv or sys.argv[1] not in (\"-c\", \"-d\")):\r\n print(\"Usage: python -m snappy <-c/-d> [src [dst]]\")\r\n print(\" -c compress\")\r\n print(\" -d decompress\")\r\n print(\"output is stdout if dst is omitted or '-'\")\r\n print(\"input is stdin if src and dst are omitted or src is '-'.\")\r\n sys.exit(1)\r\n\r\n if len(sys.argv) >= 4 and sys.argv[3] != \"-\":\r\n dst = open(sys.argv[3], \"wb\")\r\n elif hasattr(sys.stdout, 'buffer'):\r\n dst = sys.stdout.buffer\r\n else:\r\n dst = sys.stdout\r\n\r\n if len(sys.argv) >= 3 and sys.argv[2] != \"-\":\r\n src = open(sys.argv[2], \"rb\")\r\n elif hasattr(sys.stdin, \"buffer\"):\r\n src = sys.stdin.buffer\r\n else:\r\n src = sys.stdin\r\n\r\n if sys.argv[1] == \"-c\":\r\n method = stream_compress\r\n else:\r\n method = stream_decompress\r\n\r\n method(src, dst)", "def call_prog(args):\n # Just dump the entirety of the command so that\n # the user can specify whatever arguments they want\n call(args)" ]
[ "0.68680656", "0.67012274", "0.67012274", "0.66923463", "0.66923463", "0.66071945", "0.64984864", "0.6440137", "0.63924545", "0.6384571", "0.6353774", "0.63000405", "0.6298342", "0.62937367", "0.6274231", "0.6258619", "0.6252007", "0.62427706", "0.621087", "0.61711746", "0.6156566", "0.6146984", "0.61303604", "0.61265117", "0.6122675", "0.6121437", "0.61117035", "0.6109215", "0.61077636", "0.60983336", "0.6073003", "0.6067238", "0.60483104", "0.60306627", "0.60261804", "0.6015566", "0.6013403", "0.6013403", "0.6013403", "0.6013403", "0.6013403", "0.6013403", "0.6013403", "0.6013403", "0.6007978", "0.59921324", "0.59920096", "0.5983493", "0.5970742", "0.59643275", "0.5955502", "0.59530073", "0.5940436", "0.5934396", "0.5932683", "0.59171677", "0.5890722", "0.5877369", "0.5877369", "0.58761305", "0.5874561", "0.58724195", "0.5867481", "0.5858614", "0.58585525", "0.58540946", "0.58470535", "0.584642", "0.5846215", "0.58458036", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.58416975", "0.5841326", "0.58406276", "0.5824756" ]
0.0
-1
Newton's second law of motion for measuring stoppnig distance Newton's second law of motion is d = (1/2)(v02/(mug)) so the stopping distance of an object in motion, like a car, can be measured. The friction coefficient measures how slick a road is with a default of 0.3.
def stopping_length_function(initial_velocity=120, friction_coefficient=0.3): g = 9.81 v0 = initial_velocity/3.6 mu = friction_coefficient return (1/2)*(v0**2/(mu*g))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_force_from_damping(v, damping, masses):\n F = masses*damping*np.diff(v, 0)\n\n return F", "def duty_cycle_by_force(newton: float, profile: GripForceProfile) -> float:\n if profile.min <= newton <= profile.max:\n return sum(ele[1] * (newton ** ele[0]) for ele in profile.polynomial)\n else:\n raise ValueError(\"Gripper force out of bounds\")", "def friction_factor(v1: \"int\", v2: \"int\") -> \"int\":", "def friction_model():\n return TimeWeakening()", "def work_dos():\n #potential = 2x**2+x**2y+y**2\n x1,y1 = (2, -3)\n x2,y2 = (-1, 2)\n p1 = (2*(x1**2)) + ((x1**2)*y1) + (y1**2)\n p2 = (2*(x2**2)) + ((x2**2)*y2) + (y2**2)\n sol = p1 - p2\n sol = abs(sol)\n print(f'The vector field F=(4x+2xy,x2+2y) \\n'\n 'along the curve C parametrized by r(t)=(3t−1,−5t+2) \\n '\n f'for 0 ≤ t ≤ 1 is: {sol}')", "def friction_factor_2(v1: \"int\", v2: \"int\") -> \"int\":", "def my_Newton( fct, df_dx, x0):\r\n xn = float(x0)\r\n eps = 1e-5\r\n N = 20\r\n i = 0\r\n while abs( fct( xn**(i + 1)) - fct( xn**i)) > eps and i < N:\r\n x_next = xn - fct(xn)/df_dx(xn)\r\n print( i, 'fct value', abs( fct(xn)), x_next)\r\n xn = x_next\r\n i += 1\r\n if abs( fct( xn)) < eps:\r\n return x_next\r\n else: #solution did not converge\r\n return np.nan", "def speed(v, t):\n rf = v.orbit.body.non_rotating_reference_frame\n vec = v3minus(v.velocity(rf), t.velocity(rf))\n a = vec[0] * vec[0]\n b = vec[1] * vec[1]\n c = vec[2] * vec[2]\n return math.sqrt(a + b + c)", "def car_dynamics(self,x, t, u, p):\n # f = vehicle_dynamics_ks(x, u, p)\n f = vehicle_dynamics_st(x, u, p)\n # f = vehicle_dynamics_std(x, u, p)\n # f = vehicle_dynamics_mb(x, u, p)\n return f", "def FO2(lam):\n return 1.096 + 1.385 *1e-3 *lam**(-2) + 1.448 *1e-4 *lam**(-4)", "def calculate_forces(v0, mu, density_m, CD, diameter_b, \\\n area_b, volume_b, density_b, \\\n dt, T):\n \n # Gravitational const. m/s^2\n g = 9.81 \n # Proportionality constant for\n # Reynolds number\n Re_const = diameter_b*density_m/mu\n \n a_s = 3*math.pi*diameter_b*mu/(density_b*volume_b)\n a_q = 0.5*CD*density_m*area_b/(density_b*volume_b)\n b = g*(density_m/density_b - 1.0)\n \n # Numerical solution gives velocity as \n # a function of time.\n v, t = vm.solver(v0, a_s, a_q, b, Re_const, T, dt) \n\n # Initialize vectors\n Fg = zeros(len(v))\n Fb = zeros(len(v))\n Fd = zeros(len(v))\n\n # Loop over time steps\n for n in range(0, len(v)):\n # Evaluate Reynolds number\n Re = Re_const*v[n] \n \n # Gravity force\n Fg[n] = -density_b*volume_b*g\n # Bouyancy force\n Fb[n] = density_m*g*volume_b\n \n # Drag force\n if abs(Re) < 1:\n # If Re < 1, use Stokes' drag force \n Fd[n] = -3.0*math.pi*diameter_b*mu*v[n]\n else:\n # If Re >= 1, use the quadratic\n # drag force\n Fd[n] = -0.5*CD*density_m*area_b*abs(v[n])*v[n]\n\n \n return Fg, Fb, Fd, t", "def eval_dryfriction():\n # Environment\n env = WAMBallInCupSim(num_dof=7, max_steps=1500)\n\n # Policy (random init)\n policy_hparam = dict(num_feat_per_dim=12, bounds=(np.array([0.0]), np.array([1.0])))\n policy = DualRBFLinearPolicy(env.spec, policy_hparam, dim_mask=2)\n\n # Do the rolllouts\n t_all = []\n qpos_all = []\n dp_vals = [0.0, 0.3, 0.6, 0.9, 1.2]\n print_cbt(f\"Run policy for stiction coefficients: {dp_vals}\")\n for dpv in dp_vals:\n env.reset(\n domain_param=dict(\n joint_1_dryfriction=dpv,\n joint_2_dryfriction=dpv,\n joint_3_dryfriction=dpv,\n joint_4_dryfriction=dpv,\n joint_5_dryfriction=dpv,\n joint_6_dryfriction=dpv,\n joint_7_dryfriction=dpv,\n )\n )\n ro = rollout(env, policy, render_mode=RenderMode(video=False), eval=True)\n t_all.append(ro.time[:-1])\n qpos_all.append(ro.env_infos[\"qpos\"])\n\n # Plot\n fig, ax = plt.subplots(nrows=env.num_dof, sharex=\"all\", figsize=(16, 7))\n for i, idx_joint in enumerate([dof for dof in range(env.num_dof)]):\n ax[i].set_prop_cycle(color=plt.get_cmap(\"cividis\")(np.linspace(0, 1, env.num_dof)))\n ax[i].set_ylabel(f\"joint {idx_joint+1} pos [rad]\")\n for j in range(len(dp_vals)):\n ax[i].plot(t_all[j], qpos_all[j][:, idx_joint], ls=\"--\", label=f\"s = {dp_vals[j]}\")\n if i == 0:\n ax[i].legend(ncol=len(dp_vals))\n ax[-1].set_xlabel(\"time [s]\")\n plt.suptitle(\"Evaluation of joint stiction coefficients\")\n plt.show()", "def Delta(z):\n return (18*np.pi**2 - 82*cosmology.Ode(z) - 39*cosmology.Ode(z)**2) / cosmology.Om(z)", "def my_Newton(fct, df_dx, x0):\r\n xn = float(x0)\r\n eps = 1e-5\r\n N = 20\r\n i = 0\r\n while abs(fct (xn)) > eps and i < N:\r\n x_next = xn - fct(xn)/df_dx(xn)\r\n print(i , 'fct_value', abs(fct(xn)), x_next)\r\n xn = x_next\r\n i += 1\r\n if abs(fct(xn)) < eps:\r\n return x_next\r\n else: #solution did not converge\r\n return np.nan", "def _forces_moments(self, delta):\n # assert delta.shape == (4,1)\n da = delta[0]\n de = delta[1]\n dt = delta[2]\n dr = delta[3]\n\n e0 = self._state[3]\n e1 = self._state[4]\n e2 = self._state[5]\n e3 = self._state[6]\n u = self._state[7]\n v = self._state[8]\n w = self._state[9]\n p = self._state[10]\n q = self._state[11]\n r = self._state[12]\n\n self._Va = np.sqrt(u**2 + v**2 + w**2)\n self._alpha = np.arctan(1.0*w/u)\n self._beta = np.arcsin(1.0*v/self._Va)\n\n\n\n Fg = self.mass*self.gravity*np.array([2*(e1*e3-e2*e0),\n 2*(e2*e3 + e1*e0),\n e3**2 + e0**2 - e1**2 - e2**2,\n ])\n\n # Fg = self.mass*self.gravity*np.array([2*(e1*e3 - e2*e0),\n # 2*(e2*e3 + e1*e0),\n # e3**2 + e0**2 - e1**2 - e2**2,\n # ])\n\n M_e = 25\n sig = lambda a: (1+np.exp(-M_e*(a-self.alpha0))+np.exp(M_e*(a+self.alpha0)))/((1+np.exp(-M_e*(a-self.alpha0)))*(1+np.exp(M_e*(a+self.alpha0))))\n cla = lambda a: (1-sig(a))*(self.C_L_0+self.C_L_alpha*a)+sig(a)*(2*np.sign(a)*np.sin(a)**2*np.cos(a))\n cda = lambda a: self.C_D_p + (self.C_L_0+self.C_L_alpha*a)**2/(np.pi*self.e*self.AR)\n\n cxa = lambda a: -(cda(a)) * np.cos(a) + (cla(a)) * np.sin(a)\n\n cxq = lambda a: -self.C_D_q * np.cos(a) +self.C_L_q * np.sin(a)\n\n cxde = lambda a: -self.C_D_delta_e * np.cos(a) + self.C_L_delta_e * np.sin(a)\n\n cza = lambda a: -(cda(a)) * np.sin(a) - (cla(a)) * np.cos(a)\n\n czq = lambda a: -self.C_D_q * np.sin(a) - self.C_L_q * np.cos(a)\n\n czde = lambda a: -self.C_D_delta_e * np.sin(a) - self.C_L_delta_e * np.cos(a)\n\n c = self.c/(2.0*self._Va)\n b = self.b/(2.0*self._Va)\n\n\n\n one = 0.5*self.rho*self._Va**2*self.S_wing\n # two = np.array([[1,0,0],[0,1,0],[0,0,1]])\n three = np.array([[cxa(self._alpha)+cxq(self._alpha)*c*q+cxde(self._alpha)*de],\n [self.C_Y_0+self.C_Y_beta*self._beta+self.C_Y_p*b*p+self.C_Y_r*b*r+self.C_Y_delta_a*da+self.C_Y_delta_r*dr],\n [cza(self._alpha)+czq(self._alpha)*c*q+czde(self._alpha)*de]])\n\n Fa = np.squeeze(three) * one\n # pdb.set_trace()\n Fa = Fa.reshape((3,-1))\n\n F = Fg + Fa\n #\n # print(\"Fa:\",Fa)\n\n Fp = 0.5*self.rho*self.S_prop*self.C_prop*((self.k_motor*dt)**2-self._Va**2)\n\n # print(\"FP:\", Fp)\n\n fx = F[0] + Fp\n # + 0.5*MAV.rho*self._Va**2*MAV.S_wing*(\\\n # +cxa(self._alpha)\\\n # + cxq(self._alpha)*c*q\\\n # + cxde(self._alpha)*de\n # )\n\n fy = F[1]\n fz = F[2]\n\n # Moment time!!!\n one = 0.5*self.rho*self._Va**2*self.S_wing\n two = np.array([\\\n [self.b*(self.C_ell_0+self.C_ell_beta*self._beta+self.C_ell_p*b*p+self.C_ell_r*b*r+self.C_ell_delta_a*da+self.C_ell_delta_r*dr)],\n [self.c*(self.C_m_0+(self.C_m_alpha*self._alpha)+(self.C_m_q*c*q)+(self.C_m_delta_e*de))],\n [self.b*(self.C_n_0+(self.C_n_beta*self._beta)+(self.C_n_p*b*p)+(self.C_n_r*b*r)+(self.C_n_delta_a*da)+(self.C_n_delta_r*dr))]\n ])\n Ma = one * np.squeeze(two)\n # print(\"\\nMa:\", Ma)\n # pdb.set_trace()\n Ma = Ma.reshape((3,-1))\n\n size = Ma.shape[1]\n\n Mp = np.block([[np.ones(size)*-self.kTp*(self.kOmega*dt)**2],\n [np.zeros(size)],\n [np.zeros(size)]\n ])\n\n M = Mp + Ma\n\n Mx = M[0]\n My = M[1]\n Mz = M[2]\n\n # self._forces[0] = fx\n # self._forces[1] = fy\n # self._forces[2] = fz\n # pdb.set_trace()\n # print(fx, fy, fz, Mx, My, Mz)\n\n return np.array([fx, fy, fz, Mx, My, Mz])", "def driftRHS(field,drift_velocity,t,x):\n f = field.getValue(x)\n fs = np.linalg.norm(f)\n f = f/fs\n return -f*drift_velocity(fs)", "def diffusion_spherical_FV(c, t, r, R, D, j0):\n\n # Compute spacing\n dr = r[1]-r[0]\n\n # Evaluate j\n j = current(t, j0)\n\n # Set maximum concentration\n\n # Compute fluxes\n q = - D*r[1:-1] ** 2. * (c[1:] - c[0:-1]) / dr\n q_surf = -j*R**2\n\n # Append boundary conditions\n q = np.append(0, q)\n q = np.append(q, q_surf)\n\n # Compute discretised dc/dt\n dcdt_out = - (2. / (r[1:] + r[0:-1])) ** 2. \\\n * (q[1:] - q[0:-1]) / dr\n\n return dcdt_out", "def newton_decent_directions(function, func_derivative, func_hessian, xk, A, P, b, q, t):\r\n # calculate steepest decent direction\r\n newton_dir = -np.dot(np.linalg.inv(func_hessian(x=xk, A=A, P=P, b=b, q=q, t=t)), func_derivative(x=xk, A=A, P=P, b=b, q=q, t=t))\r\n\r\n return newton_dir", "def vel(z, c = cp.cc.c_light_cm_s/1e5):\n # return z*c/(1+z)\n return c*((1+z)**2-1)/((1+z)**2+1)", "def f(z):\n omega_m = 0.308\n omega_de = 0.692\n #omega = omega_m*(1+z)**3\n #return omega**0.6 + omega_de/70*(1+omega/2) # Dodelson approx\n\n omega = omega_m*(1+z)**3*H(0)**2/H(z)**2\n omega_de = omega_de*H(0)**2/H(z)**2\n return omega**(4/7) + omega_de/70*(1+omega/2) # Dodelson approx\n #return 5*omega/(2*(omega**(4/7) - omega_de + (1 + omega/2)*(1 + omega_de/70)))\n #return omega**0.55", "def get_fde(forecasted_trajectory, gt_trajectory) -> float:\n fde = torch.sqrt(\n (forecasted_trajectory[:,-1, 0] - gt_trajectory[:,-1, 0]) ** 2\n + (forecasted_trajectory[:,-1, 1] - gt_trajectory[:,-1, 1]) ** 2\n )\n return fde.mean()", "def simpson2(func, start, stop):\n return (func(start) + 3*func((2*start+stop)/3) + 3*func((start+2*stop)/3) + func(stop)) * (stop-start)/8", "def force ( r ):\n \n assert r.shape == (n,3), 'Incorrect shape of r'\n\n d = np.zeros_like(r) # Create d vectors (bonds)\n d[1:n,:] = r[1:n,:] - r[0:n-1,:] # Compute d vectors (zero index not used)\n\n # Store C coefficients in a matrix\n # In the general case we would not need to calculate every pair\n # and also we would make use of the symmetry cc[a,b]=cc[b,a]\n cc = np.zeros((n,n),dtype=np.float_) # Create C array (scalar products)\n for a in range(1,n):\n for b in range(1,n):\n cc[a,b]=np.dot(d[a,:],d[b,:]) # Compute C array (zero indices not used)\n\n a = n-1 # For this test there is just one angle\n\n # Here is the potential as a function of cos(theta)\n # For testing we use the simplest form: v= -cos(theta)\n # The notation matches that used in the appendix\n\n prefac = 1.0 / np.sqrt(cc[a,a]*cc[a-1,a-1])\n fac = cc[a,a-1]\n pot = -prefac*fac # This is -cos(theta)\n\n # Here we include the derivative of the potential with respect to cos(theta) in the prefactor\n # For this simple case it is -1, so the forces are simply gradients of cos(theta) as in the text\n f = np.empty_like(r) # Create force array\n fac1 = fac / cc[a,a]\n fac2 = fac / cc[a-1,a-1]\n f[a,:] = -prefac * ( fac1*d[a,:] - d[a-1,:] )\n f[a-1,:] = prefac * ( fac1*d[a,:] - fac2*d[a-1,:] + d[a,:] - d[a-1,:] )\n f[a-2,:] = prefac * ( fac2*d[a-1,:] - d[a,:] )\n\n return pot, f", "def finite_diff(F, x0, v0, dt, M, K, C, T):\r\n\r\n ### INITIAL PARAMETERS ####\r\n\r\n # defining the number of steps of analysis = Ns\r\n Ns = int(T/dt)+1\r\n # step t0 (initial acceleration)\r\n ngl = np.shape(F)[0] # captures the number of degrees of freedom\r\n\r\n ### MODELLING THE DISPLACEMENTS ###\r\n\r\n x_before = np.zeros((ngl,1))\r\n # matrix that indicates the displacements, in each degree of freedom, along the time of \r\n # duration of analysis. Each column is a time step\r\n x = np.zeros((ngl, Ns))\r\n x[:,0] = x0[:,0]\r\n\r\n ### SOLVING INITIAL STEP ###\r\n\r\n # initial Force F0 is equivalent to the first column of the matrix of load vectors F along time\r\n aux1 = np.zeros((ngl,1))\r\n aux1[:,0] = np.copy(F[:,0])\r\n aux2 = aux1 - np.dot(C,v0) - np.dot(K,x0)\r\n a0 = np.dot(la.inv(M),aux2)\r\n # step t-1 (before initial condition)\r\n x_before = dt*dt*a0/2 - dt*v0 + x0 \r\n # step t+1 (after initial condition)\r\n C1 = M / (dt*dt) + C / (2*dt)\r\n C2 = K - 2*M / (dt*dt)\r\n C3 = M / (dt*dt) - C / (2*dt)\r\n aux3 = aux1 - np.dot(C2, x0) - np.dot(C3, x_before)\r\n x[:,1] = np.dot(la.inv(C1), aux3[:,0])\r\n\r\n ### INTEGRATING ALONG THE DURATION OS ANALYSIS ###\r\n\r\n i = 0\r\n aux4 = np.zeros((ngl,1))\r\n aux5 = np.zeros((ngl,1))\r\n aux6 = np.zeros((ngl,1))\r\n aux7 = np.zeros((ngl,1))\r\n for i in range(1,Ns-1):\r\n aux4[:,0] = np.copy(F[:,i])\r\n aux5[:,0] = np.copy(x[:,i])\r\n aux6[:,0] = np.copy(x[:,i-1])\r\n aux7[:,0] = np.copy(x[:,i+1])\r\n aux7 = np.dot(la.inv(C1), aux4 - np.dot(C2,aux5) - np.dot(C3,aux6))\r\n x[:,i+1] = np.copy(aux7[:,0])\r\n return x", "def driftRHS_3D(field,drift_velocity,t,x):\n f = field.getValue(x)\n fs = np.sqrt(f[0]**2 + f[1]**2 + f[2]**2)\n f = f/fs\n return -f*drift_velocity(fs)", "def newton_update(f, df):\n def update(x):\n return x - f(x) / df(x)\n return update", "def derivative(r, t, G=6.67e-11, AU=1.496e+11,\n m1=5.972e+24, m2=6.417e+23, m3=1.989e+30,\n a1=1.0*1.496e+11, a2=1.52*1.496e+11):\n\n if G < 0:\n print(f\"The gravitational constant is negative\")\n\n if AU < 0:\n print(f\"The Astronomical unit is negative\")\n\n if m1 < 0:\n print(f\"The mass of the first body is negative\")\n\n if m2 < 0:\n print(f\"The mass of the second body is negative\")\n\n if m3 < 0:\n print(f\"The mass of the third body is negative\")\n\n if a1 < 0:\n print(f\"The distance of body 1 from the body center is negative\")\n\n if a2 < 0:\n print(f\"The distance of body 2 from the body center is negative\")\n\n x1 = r[0]\n y1 = r[1]\n v_x1 = r[2]\n v_y1 = r[3]\n\n x2 = r[4]\n y2 = r[5]\n v_x2 = r[6]\n v_y2 = r[7]\n\n x3 = r[8]\n y3 = r[9]\n v_x3 = r[10]\n v_y3 = r[11]\n\n z1 = r[12]\n z2 = r[13]\n z3 = r[14]\n\n v_z1 = r[15]\n v_z2 = r[16]\n v_z3 = r[17]\n\n r1 = np.array([x1, y1, z1])\n r2 = np.array([x2, y2, z2])\n r3 = np.array([x3, y3, z3])\n\n dr1 = v_x1\n dr2 = v_y1\n\n dr3 = (G*m2/distance(r1, r2)**3)*(x2-x1) + (G*m3/distance(r1, r3)**3)*(x3-x1)\n dr4 = (G*m2/distance(r1, r2)**3)*(y2-y1) + (G*m3/distance(r1, r3)**3)*(y3-y1)\n\n dr5 = v_x2\n dr6 = v_y2\n\n dr7 = (G*m1/distance(r1, r2)**3)*(x1-x2) + (G*m3/distance(r2, r3)**3)*(x3-x2)\n dr8 = (G*m1/distance(r1, r2)**3)*(y1-y2) + (G*m3/distance(r2, r3)**3)*(y3-y2)\n\n dr9 = v_x3\n dr10 = v_y3\n\n dr11 = (G*m1/distance(r1, r3)**3)*(x1-x3) + (G*m2/distance(r2, r3)**3)*(x2-x3)\n dr12 = (G*m1/distance(r1, r3)**3)*(y1-y3) + (G*m2/distance(r2, r3)**3)*(y2-y3)\n\n dr13 = v_z1\n dr14 = v_z2\n dr15 = v_z3\n\n dr16 = (G*m2/distance(r1, r2)**3)*(z2-z2) + (G*m3/distance(r1, r3)**3)*(z3-z1)\n dr17 = (G*m3/distance(r2, r3)**3)*(z1-z2) + (G*m1/distance(r2, r1)**3)*(z1-z2)\n dr18 = (G*m1/distance(r1, r3)**3)*(z1-z3) + (G*m2/distance(r2, r3)**3)*(z2-z3)\n\n dr = np.array([dr1, dr2, dr3, dr4, dr5, dr6,\n dr7, dr8, dr9, dr10, dr11, dr12,\n dr13, dr14, dr15, dr16, dr17, dr18])\n\n return dr", "def settling_velocity(self, evaporation_factor: float=0.3) -> _VectorisedFloat:\n if self.diameter is None:\n return 1.88e-4\n else:\n return 1.88e-4 * (self.diameter*evaporation_factor / 2.5)**2", "def epsilon_delta(self):", "def particle_velocityV(V,F,dt,Rv,sigma,epsilon,D,N): \n V += dt/2*(particle_forceV(Rv[-1], N, sigma, epsilon, D) + particle_forceV(Rv[-2], N, sigma, epsilon, D))\n return V", "def step(self, f):\n\n NVTBerendsen.scale_velocities(self)\n self.scale_positions_and_cell()\n\n #one step velocity verlet\n atoms = self.atoms\n p = self.atoms.get_momenta()\n p += 0.5 * self.dt * f\n\n if self.fixcm:\n # calculate the center of mass\n # momentum and subtract it\n psum = p.sum(axis=0) / float(len(p))\n p = p - psum\n\n self.atoms.set_positions(self.atoms.get_positions() +\n self.dt * p / self.atoms.get_masses()[:,np.newaxis])\n\n # We need to store the momenta on the atoms before calculating\n # the forces, as in a parallel Asap calculation atoms may\n # migrate during force calculations, and the momenta need to\n # migrate along with the atoms. For the same reason, we\n # cannot use self.masses in the line above.\n\n self.atoms.set_momenta(p)\n f = self.atoms.get_forces()\n atoms.set_momenta(self.atoms.get_momenta() + 0.5 * self.dt * f)\n\n\n return f", "def Leapfrog2D(p1, v1, p2, v2, dt, energy, sigma):\n a = compute_acceleration(energy, sigma, p2-p1)\n pn1 = p1 + v1*dt + 0.5 * a * dt**2\n pn2 = p2 + v2*dt - 0.5 * a * dt**2\n a2 = compute_acceleration(energy, sigma, pn2-pn1)\n vn1 = v1 + 0.5 * (a+a2) * dt\n vn2 = v2 - 0.5 * (a+a2) * dt\n return pn1, vn1, pn2, vn2", "def discharge_coefficient(self) -> _VectorisedFloat:\n return 0.6", "def cost(self, dgvel):\n J_ = self._controlled_frame.jacobian[3:6,:]\n J = param(value=matrix(J_))\n dJ = self._controlled_frame.djacobian[3:6,:]\n gvel = self._world.gvel\n Pdes = self._target_frame.pose[0:3,3]\n cf = self._controlled_frame\n dVdes = 10.*dot(cf.pose[0:3,0:3].T, Pdes - cf.pose[0:3,3]) -\\\n 2.*sqrt(10.)*dot(J_, self._world.gvel)\n return norm2(J*dgvel + param(value=matrix(dot(dJ, gvel) - dVdes)))", "def define_ufl_equations_diff(self):\n\n # Derivatives of velocity integration equation.\n if self.f1 != 0:\n self.df1_du = dlf.derivative(self.f1, self.displacement, self.trial_vector)\n self.df1_dv = dlf.derivative(self.f1, self.velocity, self.trial_vector)\n else:\n self.df1_du = 0\n self.df1_dv = 0\n self.df1_dp = 0 # This is always zero.\n\n # Derivatives of momentum equation.\n if self.displacement != 0:\n self.df2_du = dlf.derivative(self.f2, self.displacement, self.trial_vector)\n else:\n self.df2_du = 0\n\n if self.velocity != 0:\n self.df2_dv = dlf.derivative(self.f2, self.velocity, self.trial_vector)\n else:\n self.df2_dv = 0\n\n if self.pressure != 0:\n self.df2_dp = dlf.derivative(self.f2, self.pressure, self.trial_scalar)\n else:\n self.df2_dp = 0\n\n # Derivatives of incompressibility equation.\n if self.f3 != 0:\n if self.displacement != 0:\n self.df3_du = dlf.derivative(self.f3, self.displacement, self.trial_vector)\n else:\n self.df3_du = 0\n\n if self.velocity != 0:\n self.df3_dv = dlf.derivative(self.f3, self.velocity, self.trial_vector)\n else:\n self.df3_dv = 0\n\n self.df3_dp = dlf.derivative(self.f3, self.pressure, self.trial_scalar)\n else:\n self.df3_du = 0\n self.df3_dv = 0\n self.df3_dp = 0\n\n return None", "def compute_wind_speed(wind_apparent_speed, wind_apparent_dir, fix_speed):\n a = wind_apparent_speed\n b = fix_speed * 1.94\n th = wind_apparent_dir\n # law of cosine\n spd = math.sqrt(a * a + b * b - 2 * a * b * math.cos(math.pi * th / 180))\n return spd", "def v(self):\n\n # TODO This translation formula works, but needs simplified.\n\n # PWM duration can go from 0 to 4095 with 4095 representing max rpm\n# print(\"MuleBot.v MuleBot.dcMotorPWMDurationLeft:\", MuleBot.dcMotorPWMDurationLeft)\n speed_percentage = float(MuleBot.dcMotorPWMDurationLeft) / 4095.0\n# print(\"speed_percentage: \", speed_percentage)\n\n rpm = speed_percentage * self.motorMaxRPM\n# print(\"rpm: \", rpm)\n\n secondsPerMinute = 60\n revs_per_second = rpm / secondsPerMinute\n# print(\"--revs_per_second\", revs_per_second)\n\n inches_per_rev = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n INCHES_PER_METER = 39.3701\n meters_per_rev = inches_per_rev / INCHES_PER_METER\n# print(\"--meters_per_rev\", meters_per_rev)\n\n meters_per_second = meters_per_rev * revs_per_second\n\n# print(\"--meters_per_second: \", meters_per_second)\n return meters_per_second", "def DragCoeff(h,Vc,Temp_m,Thrust,S):\n T,p,rho = isa(h)\n return Thrust/(0.5*rho*VTrue(h,Vc,p,Temp_m)**2*S)", "def compute_vel(self, state, goal):\n\n \"\"\"\n Unicycle model control law:\n [v;w] = [kp 0 0; 0 ka kb]*[p;a;b]\n v = commanded linear velocity of robot\n w = commanded rotational velcoity of robot\n kp = gain parameter where kp > 0\n ka = gain parameter where ka - kp > 0\n kb = gain parameter where kb < 0\n p = distance from robot to goal\n a = angle between current robot heading and heading to goal\n b = error between current heading to goal and target end heading\n \"\"\"\n \n #print('state,goal,v,w')\n #print(state)\n #print(goal)\n\n xr = state[0][0] # m in world frame\n yr = state[1][0] # m in world frame\n thetar = state[2][0] #rads\n\n xg = goal[0] # m in world frame\n yg = goal[1] # m in world frame\n\n dy = yg - yr\n dx = xg - xr\n\n #print('')\n #print(state)\n #print(goal)\n \n # Calculate a\n a = -1*thetar + math.atan2(dy,dx)\n\n #print(a)\n\n if a > math.pi:\n a = a - 2*math.pi\n\n if a < -1*math.pi:\n a = a + 2*math.pi\n\n #print(a)\n\n # Set omega according to control law\n omega = self.ka*a\n if math.fabs(omega) > self.MAX_OMEGA:\n if omega > 0:\n omega = self.MAX_OMEGA\n else:\n omega = -1*self.MAX_OMEGA\n\n # Calculate P\n p = math.sqrt(dy*dy + dx*dx)\n\n # Set v \n v = self.kp*p\n if v > self.MAX_SPEED:\n v = self.MAX_SPEED\n\n # set the done value\n done = (p <= self.done_distance)\n\n #print(v)\n #print(omega)\n\n out_tuple = (v, omega, done)\n \n return out_tuple", "def get_speed(self) -> float: \r\n if self.distance < self.distance_stop:\r\n print(\"STOP: Obstacle detected ({} cm)\".format(self.distance))\r\n return 0\r\n elif self.distance < self.distance_slow: \r\n return self.speed * 0.8\r\n else:\r\n return self.speed", "def get_dt(radius: float, mean_speed: float, drpf: float = 0.01) -> float:\n if mean_speed == 0:\n mean_speed = 1\n return drpf * radius / mean_speed\n return drpf * radius / mean_speed", "def newton(backward_differences, max_num_iters, newton_coefficient, ode_fn_vec,\n order, step_size, time, tol, unitary, upper):\n initial_guess = tf.reduce_sum(\n tf1.where(\n tf.range(MAX_ORDER + 1) <= order,\n backward_differences[:MAX_ORDER + 1],\n tf.zeros_like(backward_differences)[:MAX_ORDER + 1]),\n axis=0)\n\n np_dtype = np_dtype = dtype_util.as_numpy_dtype(backward_differences.dtype)\n\n rhs_constant_term = newton_coefficient * tf.reduce_sum(\n tf1.where(\n tf.range(1, MAX_ORDER + 1) <= order,\n RECIPROCAL_SUMS[1:, np.newaxis].astype(np_dtype) *\n backward_differences[1:MAX_ORDER + 1],\n tf.zeros_like(backward_differences)[1:MAX_ORDER + 1]),\n axis=0)\n\n next_time = time + step_size\n step_size_cast = tf.cast(step_size, backward_differences.dtype)\n real_dtype = tf.abs(backward_differences).dtype\n\n def newton_body(iterand):\n \"\"\"Performs one iteration of Newton's method.\"\"\"\n next_backward_difference = iterand.next_backward_difference\n next_state_vec = iterand.next_state_vec\n\n rhs = newton_coefficient * step_size_cast * ode_fn_vec(\n next_time,\n next_state_vec) - rhs_constant_term - next_backward_difference\n delta = tf.squeeze(\n tf.linalg.triangular_solve(\n upper,\n tf.matmul(tf.transpose(unitary), rhs[:, tf.newaxis]),\n lower=False))\n num_iters = iterand.num_iters + 1\n\n next_backward_difference += delta\n next_state_vec += delta\n\n delta_norm = tf.cast(tf.norm(delta), real_dtype)\n lipschitz_const = delta_norm / iterand.prev_delta_norm\n\n # Stop if method has converged.\n approx_dist_to_sol = lipschitz_const / (1. - lipschitz_const) * delta_norm\n close_to_sol = approx_dist_to_sol < tol\n delta_norm_is_zero = tf.equal(delta_norm, tf.constant(0., dtype=real_dtype))\n converged = close_to_sol | delta_norm_is_zero\n finished = converged\n\n # Stop if any of the following conditions are met:\n # (A) We have hit the maximum number of iterations.\n # (B) The method is converging too slowly.\n # (C) The method is not expected to converge.\n too_slow = lipschitz_const > 1.\n finished = finished | too_slow\n if max_num_iters is not None:\n too_many_iters = tf.equal(num_iters, max_num_iters)\n num_iters_left = max_num_iters - num_iters\n num_iters_left_cast = tf.cast(num_iters_left, real_dtype)\n wont_converge = (\n approx_dist_to_sol * lipschitz_const**num_iters_left_cast > tol)\n finished = finished | too_many_iters | wont_converge\n\n return [\n _NewtonIterand(\n converged=converged,\n finished=finished,\n next_backward_difference=next_backward_difference,\n next_state_vec=next_state_vec,\n num_iters=num_iters,\n prev_delta_norm=delta_norm)\n ]\n\n iterand = _NewtonIterand(\n converged=False,\n finished=False,\n next_backward_difference=tf.zeros_like(initial_guess),\n next_state_vec=tf.identity(initial_guess),\n num_iters=0,\n prev_delta_norm=tf.constant(np.array(-0.), dtype=real_dtype))\n [iterand] = tf.while_loop(lambda iterand: tf.logical_not(iterand.finished),\n newton_body, [iterand])\n return (iterand.converged, iterand.next_backward_difference,\n iterand.next_state_vec, iterand.num_iters)", "def dist_to_stop(speed):\n return speed ** 2 / 4", "def drag(s):\n\n r = np.linalg.norm(s[0:3])\n v_atm = we*np.array([-s[1],s[0],0]) # calculate velocity of atmosphere\n v_rel = s[3:6] - v_atm\n\n rs = Re*(1-(ee*s[2]/r)**2) # calculate radius of surface\n h = r-rs\n p = 0.6*np.exp(-(h-175)*(29.4-0.012*h)/915) # in kg/km^3\n coeff = 3.36131e-9 # in km^2/kg\n acc = -p*coeff*np.linalg.norm(v_rel)*v_rel\n\n return acc", "def setFriction(self):\n if int(self.vx) != 0:\n self.ff = -abs(self.vx)/self.vx*self.mu*abs(self.fn)\n else:\n self.ff = 0", "def test_velocity(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.velocity[0], 144000.0)", "def dynamics(state,t):\n global M,m\n f = control_upright(state)\n # f = 0\n dydx = np.zeros_like(state)\n x,x_dot,th,th_dot = state #unpacking the state\n dydx[0] = x_dot\n dydx[2] = th_dot\n\n den1 = M + (m*sin(th)*sin(th))\n dydx[1] = (f + (m*g*sin(th)*cos(th)) + m*L*th_dot*th_dot*sin(th) + (b/L)*(th_dot*cos(th)))/den1\n den2 = L*den1\n dydx[3] = (((M+m)*g*sin(th) + f*cos(th) + m*L*th_dot*th_dot*sin(th)*cos(th))/den2) + (b/(m*L*L))*th_dot\n dydx[3] = -dydx[3]\n\n return dydx", "def newtonraphson(self,g_temp,var_init):\n n_step=0\n error=np.linalg.norm(self.evaluate(var_init,g_temp))\n\n while (error > 1e-12 and n_step < 50):\n #Improve solution while error is too large and the number of steps does not exceed a limit\n J_inv=np.linalg.pinv(self.jacobian(var_init,g_temp))\n var_new=var_init-np.dot(J_inv,self.evaluate(var_init,g_temp))\n error=np.linalg.norm(self.evaluate(var_new,g_temp))\n var_init=var_new\n n_step+=1\n\n return var_init", "def func_ludwigson(eps,k1,n1,k2,n2,):\n return k1*eps**n1+np.exp(k2+n2*eps)", "def excitation_force(w, z1, z2, diameter, Cm=2):\n\n config = {\n 'end1': [0, 0, z1],\n 'end2': [0, 0, z2],\n 'diameter': diameter,\n 'strip width': 1.0,\n }\n Morison_model = ViscousDragModel({\n 'inertia coefficient': Cm,\n 'members': [config],\n })\n X = Morison_model.Morison_inertial_force(w)\n return X", "def LeapFrog(self,r,v,dt):\n\n rhalf = r + np.asarray(v)*(dt/2) #Taking a half step forward with positional vector\n # predict the final velocity at the next timestep using the acceleration field at the rhalf position \n vnew = v + self.M31Accel(rhalf)*dt\n # predict the final position using the average of the current velocity and the final velocity\n rnew = r + 0.5*(v+vnew)*dt\n \n return rnew,vnew", "def __init__(self, dt=0.0001, gravity = 9.81, Damping=1000, alpha=1, mass=1000, length=1):\n self.a = alpha\n self.g = gravity\n self.m = mass\n self.l = length\n self.dt = dt\n self.Dhat = Damping/(self.m*self.l*self.a) #scaled damping constant", "def f_2(r,t):\r\n x = r[0]\r\n y = r[2]\r\n z = r[4]\r\n vx = r[1]\r\n vy = r[3]\r\n vz = r[5]\r\n # velocity equation for the ball after the bounce\r\n velocity = np.sqrt((vx+c*radius*wy)**2+(vy-c*radius*wx)**2+(-e*vz)**2)\r\n \r\n return np.array([vx, (-k_d*velocity*vx+k_l*(wy*vz-wz*vy)),\r\n vy, (-k_d*velocity*vy+k_l*(wz*vx-wx*vz)),\r\n vz,(-k_d*velocity*vz+k_l*(wz*vy-wy*vx)-g)], float)", "def test_terminalVelocity():\n import nose.tools as nt\n T = 30.; dt = 0.1; g = 9.81; m = 50.;\n Cd = 1.2; rho = 1.0; A = 0.5;\n a = Cd*rho*A/(2.*m)\n v, t = solver(T, dt, -0.1, Cd, rho, A, m)\n nt.assert_almost_equal(v[-1], -sqrt(g/a), delta=1e-4)", "def get_speed(vehicle):\n vel = vehicle.get_velocity()\n\n return 3.6 * math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2)", "def force ( r, e ):\n from math import isclose\n\n # Parameters of the Gay-Berne potential \n # \n # The key parameters are \n # mu, nu ................ the exponents \n # kappa and kappa' ....... the anisotropies \n # kappa is the ratio of intermolecular separations \n # sigma_e / sigma_s i.e. end-to-end / side-by-side \n # kappa' is the ratio of well depths \n # epsilon_s / epsilon_e i.e. side-by-side / end-to-end \n # The derived parameters are chi and chi' \n # chi = (kappa**2 - 1) / (kappa**2+1) \n # chi' = (z - 1) / (z + 1)\n # where z = (kappa') ** ( 1 / mu ) \n # \n # For convenience kappa' is spelt xappa, chi' is spelt xhi\n # We choose units such that sigma_s = 1.0 and epsilon_0 = 1.0\n # Two of the following three varieties should be commented out\n\n # Original Gay-Berne-deMiguel potential [J. Chem. Phys, 74, 3316; Mol. Phys. 74, 405 (1991)]\n mu, nu, kappa, xappa = 2, 1, 3.0, 5.0\n\n # # Luckhurst-Phippen potential [Liq. Cryst., 8, 451 (1990)]\n # mu, nu, kappa, xappa = 1, 2, 3.0, 5.0\n\n # # Berardi-Zannoni potential [J. Chem. Soc. Faraday Trans., 89, 4069 (1993)]\n # mu, nu, kappa, xappa = 1, 3, 3.0, 5.0\n\n # Derived parameters\n chi = (kappa**2 - 1.0) / (kappa**2+1.0)\n xhi = (xappa**(1.0/mu) - 1.0) / (xappa**(1.0/mu) + 1.0)\n\n # Cutoff distance; normally we would use a larger value\n r_cut = 4.0\n \n assert r.shape == (n,3), 'Incorrect shape of r'\n assert e.shape == (n,3), 'Incorrect shape of e'\n\n # Notation to match appendix\n i = 0\n j = 1\n\n ei = e[i,:]\n ej = e[j,:]\n assert isclose(np.sum(ei**2),1.0), 'Non-unit vector {} {} {}'.format(*ei)\n assert isclose(np.sum(ej**2),1.0), 'Non-unit vector {} {} {}'.format(*ej)\n\n rij = r[i,:] - r[j,:]\n rij_mag = np.sqrt( np.sum(rij**2) ) # Magnitude of separation vector\n sij = rij / rij_mag # Unit vector\n ci = np.dot( ei, sij )\n cj = np.dot( ej, sij )\n cij = np.dot( ei, ej )\n cp = ci + cj\n cm = ci - cj\n\n # Sigma formula\n cpchi = cp/(1.0+chi*cij)\n cmchi = cm/(1.0-chi*cij)\n sigma = 1.0/np.sqrt(1.0-0.5*chi*(cp*cpchi+cm*cmchi))\n\n # Epsilon formula\n eps1 = 1.0/np.sqrt(1.0-(chi*cij)**2) # Depends on chi, not xhi\n cpxhi = cp/(1.0+xhi*cij)\n cmxhi = cm/(1.0-xhi*cij)\n eps2 = 1.0-0.5*xhi*(cp*cpxhi+cm*cmxhi) # Depends on xhi\n epsilon = (eps1**nu) * (eps2**mu)\n\n # Potential at rij\n rho = rij_mag - sigma + 1.0\n rho6 = 1.0 / rho**6\n rho12 = rho6**2\n rhoterm = 4.0*(rho12 - rho6) # Needed for forces and torques\n drhoterm = -24.0 * (2.0 * rho12 - rho6) / rho # Needed for forces and torques\n pot = epsilon*rhoterm\n\n # Potential at r_cut\n rho = r_cut - sigma + 1.0\n rho6 = 1.0 / rho**6\n rho12 = rho6**2\n cutterm = 4.0*(rho12 - rho6) # Needed for cutoff forces and torques\n dcutterm = -24.0 * (2.0 * rho12 - rho6) / rho # Needed for cutoff forces and torques\n pot = pot - epsilon * cutterm\n\n # Derivatives of sigma\n prefac = 0.5*chi*sigma**3\n dsig_dci = prefac*(cpchi+cmchi)\n dsig_dcj = prefac*(cpchi-cmchi)\n prefac = prefac*(0.5*chi)\n dsig_dcij = -prefac*(cpchi**2-cmchi**2)\n\n # Derivatives of epsilon\n prefac = -mu*xhi*(eps1**nu)*eps2**(mu-1)\n deps_dci = prefac*(cpxhi+cmxhi)\n deps_dcj = prefac*(cpxhi-cmxhi)\n prefac = prefac*(0.5*xhi)\n deps_dcij = -prefac*(cpxhi**2-cmxhi**2) # From derivative of eps2\n deps_dcij = deps_dcij + nu*(chi**2)*(eps1**(nu+2))*(eps2**mu)*cij # From derivative of eps1\n\n # Derivatives of potential\n dpot_drij = epsilon * drhoterm\n dpot_dci = rhoterm * deps_dci - epsilon * drhoterm * dsig_dci\n dpot_dcj = rhoterm * deps_dcj - epsilon * drhoterm * dsig_dcj\n dpot_dcij = rhoterm * deps_dcij - epsilon * drhoterm * dsig_dcij\n\n # Standard formula for forces and torque gradients\n fij = -dpot_drij*sij - dpot_dci*(ei-ci*sij)/rij_mag - dpot_dcj*(ej-cj*sij)/rij_mag\n gi = dpot_dci*sij + dpot_dcij*ej\n gj = dpot_dcj*sij + dpot_dcij*ei\n\n # Derivatives of potential at cutoff\n dpot_drij = epsilon * dcutterm\n dpot_dci = cutterm * deps_dci - epsilon * dcutterm * dsig_dci\n dpot_dcj = cutterm * deps_dcj - epsilon * dcutterm * dsig_dcj\n dpot_dcij = cutterm * deps_dcij - epsilon * dcutterm * dsig_dcij\n\n # Standard formula for forces and torque gradients (without dpot_drij term)\n fij = fij + dpot_dci*(ei-ci*sij)/rij_mag + dpot_dcj*(ej-cj*sij)/rij_mag\n gi = gi - ( dpot_dci*sij + dpot_dcij*ej ) \n gj = gj - ( dpot_dcj*sij + dpot_dcij*ei ) \n\n # Final forces and torques\n f = np.empty_like(r)\n t = np.empty_like(r)\n f[i,:] = fij\n f[j,:] = -fij\n t[i,:] = -np.cross(ei,gi)\n t[j,:] = -np.cross(ej,gj)\n\n return pot, f, t", "def simpson(func, start, stop):\n return (func(start) + 4*func((start+stop)/2) + func(stop)) * (stop-start)/6", "def lennard_jones_force(r, f_min, r_min):\n epsilon = f_min * (169 * (r_min / (2 ** (1 / 6))) / (252 * (7 / 13) ** (1 / 6) * 2 ** (5 / 6)))\n sigma = r_min / (2 ** (1 / 6))\n return 48 * epsilon * np.power(sigma, 12) / np.power(r, 13) - 24 * epsilon * np.power(sigma, 6) / np.power(r, 7)", "def FN2(lam):\n return 1.034 + 3.17 *1e-4 *lam**(-2)", "def Y_force(omega_B, V_B, m):\n t = Symbol(\"t\")\n return m * (diff(V_B[1], t) + omega_B[2] * V_B[0] - omega_B[0] * V_B[2])", "def calcDerivatives(self,Omega,U,xpos,ypos,psi):\r\n theta_w,mu_w,sigma_w = self.theta_w,self.mu_w,self.sigma_w\r\n theta_u,mu_u,sigma_u = self.theta_u,self.mu_u,self.sigma_u\r\n \r\n sigma_o = self.sigma_o\r\n dt = self.dt\r\n \r\n # Compute coupling force fc\r\n fc = sigma_o*(2*sigma_o/sigma_w)**(-U/mu_u)\r\n \r\n # Compute wall force\r\n dw = PersistentFish.findDistance(self,self.bounds,xpos,ypos,psi);\r\n# fw = 2.25*math.exp(-0.11*dw)\r\n fw = 8*math.exp(-0.11*dw)\r\n \r\n if Omega >= 0:\r\n # Repulsive behavior, depending on sign of previous turning speed \r\n # it'll push in either direction\r\n fw = -fw\r\n \r\n # Obtain random values that act as forcing terms.\r\n # randn() should work just like randn in matlab. Normally distributed about 0 mean\r\n dZ = np.random.randn()\r\n dW = np.random.randn()\r\n \r\n # Derivatives\r\n Omegadot = theta_w*(mu_w+fw-Omega)*dt + fc*dZ;\r\n Udot = theta_u*(mu_u-U)*dt + sigma_u*dW;\r\n \r\n return Omegadot, Udot, dw # return dw for detecting collision\r", "def _derivatives(self, state, forces_moments):\n # extract the states\n pn = state[0]\n pe = state[1]\n pd = state[2]\n e0 = state[3]\n e1 = state[4]\n e2 = state[5]\n e3 = state[6]\n u = state[7]\n v = state[8]\n w = state[9]\n # state[6:10] = normalize(state[6:10])\n p = state[10]\n q = state[11]\n r = state[12]\n # extract forces/moments\n fx = forces_moments[0]\n fy = forces_moments[1]\n fz = forces_moments[2]\n l = forces_moments[3]\n m = forces_moments[4]\n n = forces_moments[5]\n\n\n # with warnings.catch_warnings():\n # warnings.filterwarnings('error')\n # try:\n # # position kinematics\n # except Warning as e:\n # pdb.set_trace()\n # print(e)\n\n pn_dot = (e1**2+e0**2-e2**2-e3**2)*u + 2*(e1*e2-e3*e0)*v + 2*(e1*e3+e2*e0)*w\n pe_dot = 2*(e1*e2+e3*e0)*u + (e2**2+e0**2-e1**2-e3**2)*v + 2*(e2*e3-e1*e0)*w\n pd_dot = 2*(e1*e3-e2*e0)*u + 2*(e2*e3+e1*e0)*v + (e3**2+e0**2-e1**2-e2**2)*w\n\n # pn_dot = (e0**2+e1**2-e2**2-e3**2)*u + 2*(e1*e2+e3*e0)*v + 2*(e1*e3-e2*e0)*w\n # pe_dot = 2*(e1*e2-e3*e0)*u + (e0**2-e1**2+e2**2-e3**2)*v + 2*(e2*e3+e1*e0)*w\n # pd_dot = 2*(e1*e3+e2*e0)*u + 2*(e2*e3-e1*e0)*v + (e0**2-e1**2-e2**2+e3**2)*w\n\n # pdb.set_trace()\n\n # position dynamics\n mass = self.mass\n u_dot = (r*v-q*w)+fx/mass\n v_dot = (p*w-r*u)+fy/mass\n w_dot = (q*u-p*v)+fz/mass\n\n # rotational kinematics\n e0_dot = 0.5*(-p*e1-q*e2-r*e3)\n e1_dot = 0.5*(p*e0+r*e2-q*e3)\n e2_dot = 0.5*(q*e0-r*e1+p*e3)\n e3_dot = 0.5*(r*e0+q*e1-p*e2)\n\n # rotatonal dynamics\n p_dot = self.gamma1*p*q - self.gamma2*q*r + self.gamma3*l + self.gamma4*n\n q_dot = self.gamma5*p*r - self.gamma6*(p**2-r**2) + m/self.Jy\n r_dot = self.gamma7*p*q - self.gamma1*q*r + self.gamma4*l + self.gamma8*n\n\n # collect the derivative of the states\n x_dot = np.array([pn_dot, pe_dot, pd_dot, e0_dot, e1_dot, e2_dot, e3_dot,\n u_dot, v_dot, w_dot, p_dot, q_dot, r_dot])\n # pdb.set_trace()\n\n\n # print(x_dot)\n return x_dot", "def Z_force(omega_B, V_B, m):\n t = Symbol(\"t\")\n return m * (diff(V_B[2], t) + omega_B[0] * V_B[1] - omega_B[1] * V_B[0])", "def newton(f, x0, Df, tol=1e-5, maxiter=15, alpha=1.):\n raise NotImplementedError(\"Problem 1 Incomplete\")", "def test_newton():\n\n f = lambda x: x**2 + np.sin(5*x)\n df = lambda x: 2*x + 5*np.cos(5*x)\n ddf = lambda x: 2 + 0,-25*np.sin(5*x)\n\n\n print newtonsMethod(f,df,ddf, 0, niter = 100)", "def move(self, friction = 0.0):\n try:\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n self.goto(newX, newY)\n # apply friction\n self.dx = self.dx * (1 - friction)\n self.dy = self.dy * (1 - friction)\n except:\n print(\"Error, probably because dx and dy are not properties of the turtle\")", "def zeroLJForce(sigma):\n return 2**(1/6) * sigma", "def acceleration(v,u,t):\n return ((v-u)/t)", "def second_moment(self, mass, z=None):\n return 1.0", "def dipole_moment(self, psi1=0, psi2=1):\n return dot(self.ev[psi1], self.x * self.ev[psi2])", "def do_leapfrog(self, crd, frc, vel, masses, params):\n\n # istp : current step(t)\n # crd : current step(t)'s coordinate\n # frc : current step(t)'s force\n # vel : current step(t-dt/2)'s velocity\n # crd_next : next step(t+dt)'s coordinate\n # vel_next : next step(t+dt/2))'s velocity\n # frc_next : next step(t+dt)'s force\n # vel_dt = 0.5*(vel + vel_next)\n\n # Preparation of the dynamcs on 0th step\n ms = numpy.array(masses)[:, None]\n # dt = params.dt\n # dt = params.dts\n dt = params.dt * 1000.0\n nstep = params.num_steps\n coef = self.coef\n\n # log the information of the restart file\n logger.info('*** The information of the restart file ***')\n logger.info(\" Velocity is 0+1/2-th step' informations\")\n # log temperature\n ek, temp = self.cal_temp(vel, ms)\n self.output_temp(ek, temp)\n # log energy\n if self.__setting.output.output_energy: self.__tbf.output_energy()\n # log forces\n if logger.is_debug(): self.__tbf.output_force()\n\n # do dynamics\n for istp_1 in range(nstep):\n istp = istp_1+1\n\n t0 = time.time()\n\n logger.set_curstep(istp)\n logger.info_cycle('*** ISTEP = {}, CURRENT TIME = {} ***'\n .format(istp, istp * dt))\n\n # calculate next step's velocity\n # v(t+dt/2) = v(t-dt/2) + dt * F(t) /m\n vel_next = vel + dt*frc/ms * coef\n vel_t = 0.5*( vel + vel_next )\n\n # calculate next step's coordinate\n # r(t+dt) = r(t) + dt*v(t+dt/2)\n crd_next = crd + dt*vel_next\n \n # calculate next step's forces from coordinate\n frc_next = self.cal_force(crd_next)\n\n self.store_time('integrator', time.time()-t0)\n yield istp, (crd_next, vel_next)\n\n crd = crd_next\n frc = frc_next\n vel = vel_next\n\n # log temperature\n ek, temp = self.cal_temp(vel_t, ms)\n self.output_temp(ek, temp)\n # log energy\n if self.__setting.output.output_energy: self.__tbf.output_energy()\n # log forces\n if logger.is_debug(): self.__tbf.output_force()", "def friction(self, qd):\n\n qd = getvector(qd, self.n)\n tau = np.zeros(self.n)\n\n for i in range(self.n):\n tau[i] = self.links[i].friction(qd[i])\n\n return tau", "def force_frenkel(r, epsilon, sigma, Rc, n):\n\n alpha=2*n*Rc**2*((1+2*n)/(2*n*(Rc**2-1)))**(2*n+1)\n\n Rc_term=(Rc/r)**2-1\n sigma_term=(sigma/r)**2-1\n first_term=-alpha*epsilon\n second_term=(-2*sigma**2/(r**3))*Rc_term**(2*n)\n third_term=-(Rc**2*4*n*Rc_term**(2*n-1)*sigma_term/r**3)\n\n# print \"The first term is %f, the second term is %f and the third is %f\"%(first_term, second_term, third_term)\n\n force=first_term*(second_term+third_term)\n\n return force", "def test_correct_second_derivative_center_order4(self):\r\n coeffs, shifts = finite_diff_coeffs(2, 4, \"center\")\r\n assert np.allclose(coeffs, [-2.5, 4 / 3, 4 / 3, -1 / 12, -1 / 12])\r\n assert np.allclose(shifts, [0, -1, 1, -2, 2])", "def get_velocity(self):\n return self.momentum/self.mass", "def rhs(y, t, l, m, g):\n # Unpack the states so you can use the variable names in the\n # sympy.physics.mechanics equations\n q1 = y[0]\n q2 = y[1]\n u1 = y[2]\n u2 = y[3]\n # or you can make use of python's tuple unpacking for a one liner\n # q1, q2, u1, u2 = y\n\n # Initialize a vector for the derivatives.\n dydt = zeros((len(y)))\n\n # Compute the derivatives, these are pasted in from the\n # sympy.physics.mechanics results.\n dydt[0] = u1\n dydt[1] = u2\n dydt[2] = (-g*sin(q1)*sin(q2)**2 + 2*g*sin(q1) -\n g*sin(q2)*cos(q1)*cos(q2) + 2*l*u1**2*sin(q1)*cos(q1)*cos(q2)**2 -\n l*u1**2*sin(q1)*cos(q1) - 2*l*u1**2*sin(q2)*cos(q1)**2*cos(q2) +\n l*u1**2*sin(q2)*cos(q2) + l*u2**2*sin(q1)*cos(q2) -\n l*u2**2*sin(q2)*cos(q1))/(l*(sin(q1)**2*sin(q2)**2 +\n 2*sin(q1)*sin(q2)*cos(q1)*cos(q2) + cos(q1)**2*cos(q2)**2 - 2))\n dydt[3] = (-sin(q1)*sin(q2)/2 - cos(q1)*cos(q2)/2)*(2*g*l*m*sin(q1) -\n l**2*m*(-sin(q1)*cos(q2) +\n sin(q2)*cos(q1))*u2**2)/(l**2*m*(sin(q1)*sin(q2)/2 +\n cos(q1)*cos(q2)/2)*(sin(q1)*sin(q2) + cos(q1)*cos(q2)) -\n l**2*m) + (g*l*m*sin(q2) - l**2*m*(sin(q1)*cos(q2) -\n sin(q2)*cos(q1))*u1**2)/(l**2*m*(sin(q1)*sin(q2)/2 +\n cos(q1)*cos(q2)/2)*(sin(q1)*sin(q2) + cos(q1)*cos(q2))\n - l**2*m)\n\n # Return the derivatives.\n return dydt", "def delta_v_calc(mass_initial,\n mass_final,\n v_exhaust,\n ):\n\n return v_exhaust * math.log(mass_initial / mass_final)", "def f(r,t):\r\n x = r[0]\r\n y = r[2]\r\n z = r[4]\r\n vx = r[1]\r\n vy = r[3]\r\n vz = r[5]\r\n velocity = np.sqrt(vx**2+vy**2+vz**2)\r\n #if np.abs(z)>eps:\r\n velocity = np.sqrt((vx+c*radius*wy)**2+(vy-c*radius*wx)**2+(-e*vz)**2)\r\n \r\n # equations for a cricket ball in motion\r\n return np.array([vx, (-k_d*velocity*vx+k_l*(wy*vz-wz*vy)),\r\n vy, (-k_d*velocity*vy+k_l*(wz*vx-wx*vz)),\r\n vz,(-k_d*velocity*vz+k_l*(wz*vy-wy*vx)-g)], float)", "def compute_f_ColeBrook(self, R, e, D):\n # assume a starting correct value for the \"f\" on the right hand side (RHS)\n # uses friction factor from Barr's equation as the starting value.\n f_initial = self.compute_f_BARR(R, self.e, self.D)\n\n relative_roughness = e / D\n a = relative_roughness / 3.71\n b = 2.51 / (R * sqrt(f_initial))\n\n # Compute the f on the LHS ------ (1) \n f_final = 1 / (-2 * log((a + b), 10))**2\n \n # Make sure friction factor is correct to at least 6 decimal place.\n tolerance = 0.0000001\n \n # if the f on the LHS is not within tolerance limit,\n # replace it on the RHS and recompute the f on the LHS till it's within\n # tolerance limit.\n while abs(f_final - f_initial) >= tolerance:\n f_initial = f_final\n b = 2.51 / (R * sqrt(f_initial))\n f_final = 1 / (-2 * log((a + b), 10))**2\n return f_final", "def moonPass(deltaTime, duration, startingX, startingY, startingVelocityX, startingVelocityY, massRocket, rocketForce, rocketTransferDuration, moonStartAngle, manual = False):\r\n global rocketStage # initialising the global variables\r\n global massMoon\r\n \r\n moonPosX = earthMoonDistance*math.sin(math.pi*moonStartAngle/180) #m Here we have the x coordinate of the moon\r\n moonPosY = earthMoonDistance*math.cos(math.pi*moonStartAngle/180) #m Here the Y coordinate\r\n moonVelocity = math.sqrt(G*massEarth/earthMoonDistance) #the velocity of the moon as calculated by the Vis-Visa equation for a circle\r\n velocityXMoon = moonVelocity*math.cos(math.pi*moonStartAngle/180) # here we set the x and y velocity components of the moon's starting conditions\r\n velocityYMoon = -moonVelocity*math.sin(math.pi*moonStartAngle/180)\r\n \r\n h = deltaTime # this is the time step size, which can be altered by the user.\r\n time = [] # creating an empty array where the values of time are stored.\r\n time.append(0) #setting the first value in the time array to 0s\r\n kPos = [[0 for x in range(4)] for y in range(2)] #here we create multi-demensional arrays, 4x2, where the values for k in the runge kutta method are stored. This array is dedicated to the k's for position.\r\n kV = [[0 for x in range(4)] for y in range(2)] # same as the line above but for k's used in the velocity calculations.\r\n velocityX = [] #creating more empty arrays, for velocity, position and speed.\r\n velocityY = []\r\n posX = []\r\n posY = []\r\n speed = []\r\n \r\n velocityX.append(startingVelocityX) #here we append the first value of the array to the starting values defined by the user.\r\n velocityY.append(startingVelocityY)\r\n posX.append(startingX)\r\n posY.append(startingY)\r\n i = 0 # i is the counter used in the while loop below, to keep track of the number of iterations performed.\r\n speed.append(math.sqrt(velocityX[i]**2+velocityY[i]**2)) #here we append the starting value for the scalar value speed.\r\n \r\n moonPosXArray = [] #initialising arrays for the moon's position and velocity\r\n moonPosYArray = []\r\n velocityXMoonArray = []\r\n velocityYMoonArray = [] \r\n moonPosXArray.append(moonPosX)\r\n moonPosYArray.append(moonPosY)\r\n \r\n startingAngle = 180*math.atan(startingY/-startingX)/math.pi #calculate starting angle\r\n orbitalRadius = [] #Initialise this empty array which will hold the distace from Earth\r\n moonDistance = []\r\n totalEnergy = []\r\n angle = [] # This array will hold the anglular position of the projectile from Earth with 0 degrees pointing towards the starting position.\r\n orbitalRadius.append((math.sqrt(startingX**2+startingY**2))) #here we append the starting orbital radius at the starting position\r\n moonDistance.append((math.sqrt((startingX-moonPosX)**2+(startingY-moonPosY)**2)))\r\n angle.append(startingAngle) #here we append the starting angle\r\n totalEnergy.append(0.5*(startingVelocityX**2+startingVelocityY**2)-G*massEarth/(orbitalRadius[0])-G*massMoon/(moonDistance[0])) \r\n orbitCount = 0 # uneeded\r\n looped = True # unneeded\r\n orbitLoop = -1 # unndeeded\r\n negativeSection = 0 # In order to caculate the anglular position in 360 degrees circles, then 180 degrees needs to be added on to the trigonometric equation in certian quatiles. This variable contains the correct factor.\r\n \r\n rocketStage = 1 #initialise the stage of the projectile flight to 1\r\n \r\n while rocketStage != 6 and time[i]<duration: # while the rocket has completed all of the stages of flight and the timer hasn't run out for maximum allowed flight length.\r\n #k1s\r\n kPos[0][0] = velocityX[i] # this value is k1 for the x position. It is just the velocity of the rocket at its current position.\r\n kPos[1][0] = velocityY[i] #this value is k1 for the y position\r\n kV[0][0] = moonCalcX(posX[i], posY[i], moonPosX, moonPosY, angle[i], velocityX[i], velocityY[i], massRocket, rocketForce) #this value is k1 for the x velocity. At its current position what is the acceleration of the projectile\r\n kV[1][0] = moonCalcY(posX[i], posY[i], moonPosX, moonPosY, angle[i], velocityX[i], velocityY[i], massRocket, rocketForce) # this value is k1 for the y velocity\r\n \r\n #k2s\r\n kPos[0][1] = velocityX[i] + h*kV[0][0]/2 #what would its velocity be if it carried on at its initial acceleration (calculated in k1 for x velocity) for half a time step\r\n kPos[1][1] = velocityY[i] + h*kV[1][0]/2\r\n kV[0][1] = moonCalcX(posX[i] + h*kPos[0][0]/2, posY[i] + h*kPos[1][0]/2, moonPosX, moonPosY, angle[i], velocityX[i], velocityY[i], massRocket, rocketForce) # if it continued at the velocity in k2 for x position for half a time step what would the acceleration on the projectile be.\r\n kV[1][1] = moonCalcY(posX[i] + h*kPos[0][0]/2, posY[i] + h*kPos[1][0]/2, moonPosX, moonPosY, angle[i], velocityX[i], velocityY[i], massRocket, rocketForce)\r\n \r\n #k3s\r\n kPos[0][2] = velocityX[i] + h*kV[0][1]/2 # if it carried on at the acceleration calculated for k2 in x velocity for half a time step, what would its velocity be\r\n kPos[1][2] = velocityY[i] + h*kV[1][1]/2\r\n kV[0][2] = moonCalcX(posX[i] + h*kPos[0][1]/2, posY[i] + h*kPos[1][1]/2, moonPosX, moonPosY, angle[i], velocityX[i], velocityY[i], massRocket, rocketForce) # if carried on at the velocity calculated in k2 for half a time step then what would its accelaration be\r\n kV[1][2] = moonCalcY(posX[i] + h*kPos[0][1]/2, posY[i] + h*kPos[1][1]/2, moonPosX, moonPosY, angle[i], velocityX[i], velocityY[i], massRocket, rocketForce)\r\n \r\n #k4s\r\n kPos[0][3] = velocityX[i] + h*kV[0][2] # if it carried on at the acceleration calcualted in k3 fro a whole timestep, then what would its velocity be \r\n kPos[1][3] = velocityY[i] + h*kV[1][2]\r\n kV[0][3] = moonCalcX(posX[i] + h*kPos[0][2], posY[i] + h*kPos[1][2], moonPosX, moonPosY, angle[i], velocityX[i], velocityY[i], massRocket, rocketForce) #if it continued at the velocity calculated in k3 for a whole time step, then what would its accelaration be\r\n kV[1][3] = moonCalcY(posX[i] + h*kPos[0][2], posY[i] + h*kPos[1][2], moonPosX, moonPosY, angle[i], velocityX[i], velocityY[i], massRocket, rocketForce)\r\n \r\n time.append(time[i]+h) #here the new times step is appended to the time array\r\n velocityX.append(velocityX[i]+(h/6)*(kV[0][0]+2*kV[0][1]+2*kV[0][2]+kV[0][3])) # the velocity in x is appended, after combining the ks for velocity in x\r\n velocityY.append(velocityY[i]+(h/6)*(kV[1][0]+2*kV[1][1]+2*kV[1][2]+kV[1][3])) # the velocity in y is appended, after combining the ks for velocity in y\r\n posX.append(posX[i]+(h/6)*(kPos[0][0]+2*kPos[0][1]+2*kPos[0][2]+kPos[0][3])) # the x position is appended, after combinging the ks for x position\r\n posY.append(posY[i]+(h/6)*(kPos[1][0]+2*kPos[1][1]+2*kPos[1][2]+kPos[1][3])) # the y position is appended, after combinging the ks for y position\r\n speed.append(math.sqrt(velocityX[i]**2+velocityY[i]**2)) # the speed is calculated and appended, by finding the magnitude of the velocity in the x-y plane\r\n i +=1 # i is incremented by 1\r\n \r\n if manual == False: # if the moon is not stationary\r\n moonPosX, moonPosY, velocityXMoon, velocityYMoon = dynamicMoon(moonPosX, moonPosY, velocityXMoon, velocityYMoon, h) # call the function to find the position and velocity of the moon.\r\n moonPosXArray.append(moonPosX) #add the values to the relevent arrays\r\n moonPosYArray.append(moonPosY)\r\n velocityXMoonArray.append(velocityXMoon)\r\n velocityYMoonArray.append(velocityYMoon)\r\n \r\n \r\n if time[i-1]+h > duration and manual == True: # if we come to the end of the simulation\r\n rocketStage == 6\r\n \r\n orbitalRadius.append(math.sqrt(posX[i]**2+posY[i]**2)) # the orbital radius is calculated and appended\r\n moonDistance.append(math.sqrt((posX[i]-moonPosX)**2+(posY[i]-moonPosY)**2)) # calculating the distance to the moon\r\n totalEnergy.append(0.5*(velocityX[i]**2+velocityY[i]**2)-G*massEarth/(orbitalRadius[i])-G*massMoon/(moonDistance[i])) # the total energy at each time step is calculated by summing the gravitational potential with the kinetic \r\n if posX[i] > 0: # if the x coordinate of its position in positive (remember Earth is at (0,0))\r\n negativeSection = 1 \r\n elif posY[i] < 0: # if the y coordinate of its position is negative\r\n negativeSection = 2\r\n else:\r\n negativeSection = 0\r\n angle.append(180*math.atan(posY[i]/-posX[i])/math.pi + 180*negativeSection) # calculate and append the angle, adding on the correct factor of 180 to create the 360 degree circle\r\n \r\n if manual == True and orbitalRadius[i] <= earthRadius: # if a collision with Earth is detected\r\n rocketStage = 6 # stop the simulation\r\n print(\"Collision with Earth Detected\")\r\n \r\n if manual == True and moonDistance[i] <= moonRadius: #if a collision with the moon is detected\r\n rocketStage = 6 # stop the simulation\r\n print(\"Collision with Moon Detected\")\r\n \r\n if manual == False: # all the different stages for the automatic simulation, each different stage corresponds to the force that needs to be applied on the rocket.\r\n if i > 1: # if there has been more than one iteration of the while loop\r\n if angle[i]-angle[i-1] < 0 and rocketStage == 2: #if the angle jumps from 360 degrees to 0 which it will do on every complete circle.\r\n rocketStage = 2.5 \r\n if angle[i]-angle[i-1] < 0 and rocketStage == 4:#if the angle jumps from 360 degrees to 0 which it will do on every complete circle.\r\n rocketStage = 4.5\r\n if angle[i]-angle[i-1] < 0 and rocketStage == 5:#if the angle jumps from 360 degrees to 0 which it will do on every complete circle.\r\n rocketStage = 5.25\r\n if angle[i]-angle[i-1] < 0 and rocketStage == 5.75:#if the angle jumps from 360 degrees to 0 which it will do on every complete circle.\r\n rocketStage = 6\r\n if rocketStage == 1 and angle[i]/360 > 1-rocketTransferDuration/2: #if rocket stage equals 1 and the anle where the rockets come on has been exceeded\r\n rocketStage = 2\r\n if rocketStage == 2.5 and angle[i]/360 > rocketTransferDuration/2: #if rocket stage equals 2.5 and the anle where the rockets go off has been exceeded\r\n rocketStage = 3\r\n if rocketStage == 3 and angle[i]/360 > 0.75-rocketTransferDuration/48: #if rocket stage equals 3 and the anle where the rockets come on has been exceeded\r\n rocketStage = 4\r\n if rocketStage == 4 and angle[i]/360 > 0.75+rocketTransferDuration/48: #if rocket stage equals 4.5 and the anle where the rockets go off has been exceeded\r\n rocketStage = 5\r\n if rocketStage == 5.25 and angle[i]/360 > 0.25-rocketTransferDuration/3.4:\r\n rocketStage = 5.5\r\n if rocketStage == 5.5 and angle[i]/360 > 0.25+rocketTransferDuration/3.4:\r\n rocketStage = 5.75\r\n\r\n return posX, posY, speed, time, totalEnergy, moonPosXArray, moonPosYArray, velocityXMoonArray, velocityYMoonArray", "def newtonian_profile(PSI):\n\n U = dot(MDX, PSI)\n V = - dot(MDY, PSI)\n VGRAD = dot(U,MDX) + dot(V,MDY)\n\n BPFEQNS = zeros((3*vecLen, 3*vecLen), dtype='D')\n # Cxx eqn\n # Cxx\n BPFEQNS[0:vecLen, 0:vecLen] = Nu*MDX - VGRAD \\\n + 2*tsm.c_prod_mat(dot(MDX,U)) - oneOverWi*II\n # Cyy\n BPFEQNS[0:vecLen, vecLen:2*vecLen] = 0\n # Cxy\n BPFEQNS[0:vecLen, 2*vecLen:3*vecLen] = 2*tsm.c_prod_mat(dot(MDY, U))\n # Cyy eqn\n # Cxx\n BPFEQNS[vecLen:2*vecLen, 0:vecLen] = 0\n # Cyy\n BPFEQNS[vecLen:2*vecLen, vecLen:2*vecLen] = Nu*MDX - VGRAD - oneOverWi*II\\\n + 2.*tsm.c_prod_mat(dot(MDY, V))\n # Cxy\n BPFEQNS[vecLen:2*vecLen, 2*vecLen:3*vecLen] = 2.*tsm.c_prod_mat(dot(MDX, V))\n #Cxy eqn\n # Cxx\n BPFEQNS[2*vecLen:3*vecLen, 0:vecLen] = tsm.c_prod_mat(dot(MDX, V))\n # Cyy \n BPFEQNS[2*vecLen:3*vecLen, vecLen:2*vecLen] = tsm.c_prod_mat(dot(MDY, U))\n # Cxy\n BPFEQNS[2*vecLen:3*vecLen, 2*vecLen:3*vecLen] = Nu*MDX - VGRAD - oneOverWi*II \n\n RHS = zeros(3*vecLen, dtype='D')\n RHS[0] = -oneOverWi\n RHS[vecLen] = -oneOverWi\n RHS[2*vecLen:3*vecLen] = 0\n\n soln = linalg.solve(BPFEQNS, RHS)\n\n Cxx = soln[0:vecLen]\n Cyy = soln[vecLen:2*vecLen]\n Cxy = soln[2*vecLen:3*vecLen]\n\n return Cxx, Cyy, Cxy", "def FrictionF(self,eta):\n return -3*3.14*eta*self.par.d*self.par.vel", "def speed(self):\n return sqrt(self.velocity_x ** 2 + self.velocity_y ** 2)", "def newton_raphson(f,x0,iterations): \n current = x0\n fdash = differentiate_polynomial(f)\n print(fdash)\n for i in range(iterations): \n current = current - evaluate_polynomial(f,current)/evaluate_polynomial(fdash,current)\n return current", "def calculate_speed(centre, prev_centre, time_step):\n if time_step != 0:\n y = centre[1] - prev_centre[1]\n x = centre[0] - prev_centre[0]\n return round(math.hypot(x, y) / (time_step * FRAME_W), 2)\n else:\n return 0", "def velocity_step(self, dt, force):\r\n self.vel += dt * force / self.mass", "def halley_newton ( fun , ## the function \n x , ## x \n deriv1 , ## the first derivative \n deriv2 = None , ## the second derivative\n fx = None , ## value of fun(x)\n args = () ) : ## additional arguments for function calls\n \n ## newton corrections\n d1 = float ( deriv1 ( x , *args ) ) \n fx = float ( fun ( x , *args ) ) if fx is None else fx \n \n if d1 : rn = fx / d1\n else : return None ## error here! \n \n ## make corrections: Halley's steps\n if deriv2 : \n d2 = float ( deriv2 ( x , *args ) )\n if d2 : rn /= ( 1.0 - 0.5 * rn * d2 / d1 ) ## Halley's correction \n \n return x - rn ## Newton/Halley's iteration", "def VerletHope2(r, v, beta,dt,R_dust,M_dust):\n # Deceptively simple (read about Velocity Verlet on wikipedia)\n r_new = r + v*dt + calculate_acceleration2(r,v,beta,omega,R_dust,M_dust)*dt**2/2\n v_new = v + (calculate_acceleration2(r,v,beta,omega,R_dust,M_dust) + calculate_acceleration2(r_new,v,beta,omega,R_dust,M_dust))/2 * dt\n \n return (r_new, v_new)", "def newton_method_bidirectional(f, bnd1, bnd2, Ep, step):\n\n while True:\n step = step + 1\n\n # print(\"bnd1=\",bnd1,\" and bnd2=\",bnd2)\n\n h_bnd1 = f(bnd1) / derivative(f, bnd1)\n bnd1 = bnd1 - h_bnd1\n if (decide(abs(h_bnd1) <= Ep)):\n # print(\"Root in Approximation: \",bnd1)\n return step\n\n h_bnd2 = f(bnd2) / derivative(f, bnd2)\n bnd2 = bnd2 - h_bnd2\n if (decide(abs(h_bnd2) <= Ep)):\n # print(\"Root in Approximation: \",bnd2)\n return step", "def newton_method_vector(f, x_init, epsilon = 1e-10):\n prev_value = x_init + 2 * epsilon\n value = x_init\n\n iterations = 0\n while np.all(np.abs(prev_value - value)) > epsilon:\n prev_value = value\n\n j = jacobian(f, value)\n value = value - np.dot(np.linalg.pinv(j), f(value))\n\n iterations += 1\n\n print(f\"Newton Method converged in {iterations} iterations\")\n\n return value", "def f( self , x , u , t ):\n \n dx = np.zeros(self.n) # State derivative vector\n \n ################################################\n # Place holder: put the equations of motion here\n raise NotImplementedError\n ################################################\n \n return dx", "def ODEs(y, t, B, MdiscI, RdiscI, epsilon, delta, n=1.0, alpha=0.1, cs7=1.0,\n k=0.9):\n # Initial conditions\n Mdisc, omega = y\n \n # Constants\n Rdisc = RdiscI * 1.0e5 # Disc radius - cm\n tvisc = Rdisc / (alpha * cs7 * 1.0e7) # Viscous timescale - s\n mu = 1.0e15 * B * (R ** 3.0) # Magnetic Dipole Moment\n M0 = delta * MdiscI * Msol # Global Fallback Mass Budget - g\n tfb = epsilon * tvisc # Fallback timescale - s\n \n # Radii - Alfven, Corotation, Light Cylinder\n Rm = ((mu ** (4.0 / 7.0)) * (GM ** (-1.0 / 7.0)) * ((Mdisc / tvisc) **\n (-2.0 / 7.0)))\n Rc = (GM / (omega ** 2.0)) ** (2.0 / 3.0)\n Rlc = c / omega\n # Cap the Alfven radius\n if Rm >= k * Rlc:\n Rm = k * Rlc\n \n w = (Rm / Rc) ** (3.0 / 2.0) # Fastness parameter\n \n bigT = 0.5 * I * (omega ** 2.0) # Rotational energy\n modW = (0.6 * M * (c ** 2.0) * ((GM / (R * (c ** 2.0))) / (1.0 - 0.5 * (GM\n / (R * (c ** 2.0)))))) # Binding energy\n rot_param = bigT / modW # Rotation parameter\n \n # Dipole torque\n Ndip = (-1.0 * (mu ** 2.0) * (omega ** 3.0)) / (6.0 * (c ** 3.0))\n \n # Mass flow rates and efficiencies\n eta2 = 0.5 * (1.0 + np.tanh(n * (w - 1.0)))\n eta1 = 1.0 - eta2\n Mdotprop = eta2 * (Mdisc / tvisc) # Propelled\n Mdotacc = eta1 * (Mdisc / tvisc) # Accreted\n Mdotfb = (M0 / tfb) * (((t + tfb) / tfb) ** (-5.0 / 3.0)) # Fallback rate\n Mdotdisc = Mdotfb - Mdotprop - Mdotacc # Mass flow through the disc\n \n if rot_param > 0.27:\n Nacc = 0.0 # Prevents magnetar break-up\n else:\n # Accretion torque\n if Rm >= R:\n Nacc = ((GM * Rm) ** 0.5) * (Mdotacc - Mdotprop)\n else:\n Nacc = ((GM * R) ** 0.5) * (Mdotacc - Mdotprop)\n \n omegadot = (Nacc + Ndip) / I # Angular frequency time derivative\n \n return np.array([Mdotdisc, omegadot])", "def getTOF(mass, energy, distance):\n velocity = speedOfLight * np.sqrt(2 * energy / mass)\n tof = distance / velocity\n return tof", "def constant_transition(\n speed, is_replay, penalty=1e-5, speed_knots=None, diagonal=None\n):\n\n if diagonal is None:\n diagonal = np.array([0.00003, 0.98])\n return partial(_constant_probability, diagonal=diagonal)", "def spread_dye(self, dt=0.1):\n #advection operator\n #This moves quite well now\n du_dt = -np.add.reduce(self.u*np.array(np.gradient(self.dye)))\n #diffusion \n du_dt += ndimage.laplace(self.dye)/100\n self.dye += du_dt*dt\n #Prevent negative density\n #self.dye = np.maximum(self.dye,0)\n self.dye *= self.dye_total/np.sum(self.dye)", "def test_solve_mde_forward_sqrt_values(\n ltisde_as_linearsde,\n ltisde_as_linearsde_sqrt_forward_implementation,\n v_const,\n diffusion,\n):\n out_linear, _ = ltisde_as_linearsde.forward_realization(\n v_const, t=0.0, dt=0.1, _diffusion=diffusion\n )\n\n out_linear_2, _ = ltisde_as_linearsde.forward_rv(\n out_linear, t=0.1, dt=0.1, _diffusion=diffusion\n )\n out_linear_2_sqrt, _ = ltisde_as_linearsde_sqrt_forward_implementation.forward_rv(\n out_linear, t=0.1, dt=0.1, _diffusion=diffusion\n )\n\n np.testing.assert_allclose(out_linear_2_sqrt.mean, out_linear_2.mean)\n np.testing.assert_allclose(out_linear_2_sqrt.cov, out_linear_2.cov)", "def g(point, contact_point, force_direction, ball_loc, t):\n # line equation = ball_loc + t*direction\n # distance to the ooi\n #distance = ( np.linalg.norm( np.cross((ball_loc[:2] - point[:2]), force_direction[:2], 0, 0) ) / \n # np.linalg.norm(force_direction[:2]))\n direction = force_direction\n force_direction = force_direction + contact_point\n print force_direction\n distance = np.linalg.norm(np.cross(point[:2] - contact_point[:2], point[:2] -\n force_direction[:2], 0 , 0)) / np.linalg.norm(abs(force_direction[:2] -\n contact_point[:2]))\n #the smaller the distance, the bigger the number\n distance = 100 / distance\n\n global accuracy_point \n accuracy_point= accuracy_point + [distance]\n\n retract_distance_x = math.sqrt(np.vdot(contact_point[0] - point[0],\n contact_point[0] - point[0]))\n retract_distance_y = math.sqrt(np.vdot(contact_point[1] - point[1],\n contact_point[1] - point[1]))\n retract_distance_z = math.sqrt(np.vdot(contact_point[2] - point[2], contact_point[2] - point[2]))\n global xy\n xy = xy + [retract_distance_x + retract_distance_y]\n global z \n z = z + [retract_distance_z * 0.3]\n\n retract_distance = 0\n # the retraction distance gets favored in the x and y directions\n retract_distance = (direction[0] * retract_distance_x +\n direction[1] *\n retract_distance_y + 0.3 * retract_distance_z)\n #force_direction[1] * retract_distance_y + force_direction[2] * retract_distance_z)\n global distance_point \n print retract_distance\n distance_point = distance_point + [np.ndarray.tolist(retract_distance)[0][0]]\n return (retract_distance, distance)", "def newton_method(f, x, Ep, step):\n\n while True:\n step = step + 1\n # print(\"bnd1:=\",bnd1)\n h = f(x) / derivative(f, x)\n x = x - h\n if (decide(abs(h) <= Ep)):\n break\n # print(\"Root in Approximation: \",bnd1)\n return step", "def newton_method(f, x_init = 0, epsilon = 1e-10):\n prev_value = x_init + 2 * epsilon\n value = x_init\n\n iterations = 0\n while abs(prev_value - value) > epsilon:\n prev_value = value\n\n f_dash = derivative(f, value)\n value = value - f(value) / f_dash\n\n iterations += 1\n\n print(f\"Newton Method converged in {iterations} iterations\")\n\n return value", "def cal_f_RK(yt, dyt, f, df, int_INV_D_pre, vw_div_vw0, fcn_D, cond_GT):\n phi_b = cond_GT['phi_bulk']\n ed = cond_GT['epsilon_d']\n\n y_new = yt + dyt\n f_new = f + df\n int_INV_D = int_INV_D_pre\n if df != 0.: # it is related with half-step for RK4 method\n int_INV_D += (dyt/2.)*(1./fcn_D(f, cond_GT) + 1./fcn_D(f_new, cond_GT))\n return (-1./ed)*(vw_div_vw0/fcn_D(f_new, cond_GT))*(f_new - phi_b*(1. - exp(-(vw_div_vw0/ed)*int_INV_D)))" ]
[ "0.6221095", "0.5993694", "0.59182566", "0.5850141", "0.5800747", "0.5787989", "0.56625694", "0.55726403", "0.55664736", "0.555712", "0.5540242", "0.5521722", "0.5505632", "0.54962945", "0.5486384", "0.54847544", "0.5474211", "0.5469343", "0.5457595", "0.545249", "0.54498357", "0.5445204", "0.54339385", "0.54317605", "0.54214525", "0.5419991", "0.54157037", "0.5414509", "0.54123455", "0.5404006", "0.5391163", "0.53855586", "0.53730124", "0.53700924", "0.53665596", "0.5360708", "0.5349524", "0.5349104", "0.5337225", "0.53357685", "0.53357273", "0.532213", "0.53182495", "0.53168565", "0.53127825", "0.5306844", "0.52969444", "0.5289602", "0.5289044", "0.5283494", "0.5275618", "0.5265884", "0.5264038", "0.5252595", "0.52502686", "0.5227552", "0.52260613", "0.5214371", "0.52127403", "0.52051514", "0.52050805", "0.5201602", "0.52014065", "0.5196202", "0.5195213", "0.5187836", "0.5185095", "0.518231", "0.5177318", "0.51715773", "0.51686394", "0.5165145", "0.5162114", "0.51620483", "0.51581645", "0.5158094", "0.51557165", "0.51551425", "0.5151741", "0.5150819", "0.51439816", "0.5136164", "0.5133208", "0.5132792", "0.5132215", "0.51304704", "0.5129636", "0.5126466", "0.51149446", "0.51124316", "0.5111842", "0.51088357", "0.51041746", "0.510365", "0.51008785", "0.5099537", "0.5095414", "0.5089131", "0.50868607", "0.5079238" ]
0.70825905
0
Integration function Using scitools.StringFunction to do integration. >>> integration.py 'sin(x)' 0 pi/2
def integrate_function(): def midpoint_integration(f, a, b, n=100): h = (b - a)/float(n) I = 0 for i in range(n): I += f(a + i*h + 0.5*h) return h*I f_formula = sys.argv[1] a = eval(sys.argv[2]) b = eval(sys.argv[3]) if len (sys.argv) >= 5: n = int(sys.arvg[4]) else: n = 200 from scitools.StringFunction import StringFunction f = StringFunction(f_formula) # turn formula into f(x) func. """ >>> g = StringFunction('A*exp(-a*t)*sin(omega*x)', independent_variable='t', A=1, a=0.1, omega=pi, x=0.5) >>> g.set_parameters(omega=0.1) >>> g.set_parameters(omega=0.1, A=5, x=0) >>> g(0) 0.0 >>> g(pi) 2.8382392288852166e-15 """ I = midpoint_integration(f, a, b, n) print("Integral of {:s} on [{:g}, {:g}] with n ={:d}: {:g}" \ .format(f_formula, a, b, n, I))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def integrate(f, inf_lim, sup_lim):\n function = get_function_from_text(f)\n return sp_integrate.quad(function, inf_lim, sup_lim)[0]", "def sin(x):\r\n # see decorator for function body\r", "def f(x):\n return (2.0*math.sin(10.0*x+1.0)+1.0)", "def a_math_function():\n return np.sin(2*np.pi)", "def sin(self, a):\n return math.sin(a)", "def f(x):\r\n\treturn np.sin(x)", "def sin(x):\n raise NotImplementedError", "def xsin(x):\n return x + tf.sin(x)", "def sin(x):\n if isinstance(x, int):\n x = Expression(x)\n return _sin(x)", "def f(x):\r\n return x * np.sin(x)", "def f(x):\n return x * np.sin(x)", "def f(x):\n return x * np.sin(x)", "def f(x):\n return x * np.sin(x)", "def f(x):\n return x * np.sin(x)", "def integrate(equ):\n if \"x\" in equ:\n return polynomial_equation(equ)\n else:\n return constant_equation(equ)", "def sin(x):\n return 0.0", "def integrate(self, t):", "def integrand(order, theta, x_eval):\n return np.cos(order*theta - x_eval*np.sin(theta))/np.pi", "def integrate(self, *args, **kwargs):\n from sympy.integrals.integrals import integrate\n return integrate(self, *args, **kwargs)", "def func(x):\n return jnp.sum(jnp.power(jnp.sin(x), 2))", "def _integrate_0_2pi_phis(self, expr):\n\n phi_s = sp.Symbol('phi_s')\n\n # replace first all odd powers of sin(phi_s) as these are\n # all zero for the integral\n replacements1 = [(sp.sin(phi_s) ** i, 0.)\n for i in range(1, self.SRF.ncoefs +\n self.V.ncoefs + 1) if i % 2 == 1]\n\n # then substitute the sine**2 by 1-cos**2\n replacements1 = (replacements1 +\n [(sp.sin(phi_s) ** i,\n expand((1. -\n sp.cos(phi_s) ** 2) ** sp.Rational(i, 2)))\n for i in range(2, self.SRF.ncoefs +\n self.V.ncoefs + 1) if i % 2 == 0])\n\n res = expand(expr.xreplace(dict(replacements1)))\n\n # replacements need to be done simultaneously, otherwise all\n # remaining sin(phi_s)**even will be replaced by 0\n\n # integrate the cosine terms\n replacements3 = [(sp.cos(phi_s) ** i, self._cosintegral(i))\n for i in range(1, self.SRF.ncoefs +\n self.V.ncoefs + 1)]\n\n res = expand(res.xreplace(dict(replacements3)))\n return res", "def integrate(self, x, dx):\n raise NotImplementedError(\"Not implemented yet.\")", "def sine(B):\n sin = math.sin\n pi = math.pi\n \n def f(x):\n return B*sin(pi*x)\n return f", "def f(x):\n\treturn np.sin(x / 5.0) * np.exp(x / 10.0) + 5 * np.exp(-x / 2.0)", "def sp_integrate_1D ( func , xmin , xmax , *args , **kwargs ) : \n from scipy import integrate\n ##\n result = integrate.quad ( func , xmin , xmax , *args , **kwargs )\n return result[0]", "def constant_equation(funct):\n return funct + \"x\"", "def function(self):\r\n lambd = 5*np.sin(2*np.pi*self.x_array) #The function in question\r\n return 3*np.pi*np.exp(-lambd)", "def sin_term(x, i):\n n = 2*i+1\n return alternate(i, exp_term(x, n))", "def integrate(self, x1, dx):\n return x1 + dx", "def process_fn(fn_string, symbols):\n fn_string = fn_string.replace('^', '**')\n fn = lambdify([sympy.symbols(symbols)], fn_string, 'numpy')\n return fn", "def integralFunction(xa, ya, xb, yb):\n return psi(xb, yb) - psi(xa, ya)", "def to_sine(x):\n res = (math.sin(math.pi * x))\n return res", "def sin(data):\n return _make.sin(data)", "def fun_exact(x):\n\n if func_type == \"sine\":\n return numpy.sin(x)\n elif func_type == \"tanh\":\n return 0.5*(1.0+numpy.tanh((x-1.0)/0.1))", "def eval(self, x):\n self.__check_input__(x)\n x1 = x[0]\n x2 = x[1]\n\n t = 1 / (8 * np.pi)\n s = 10\n r = 6\n c = 5 / np.pi\n b = 5.1 / (4 * np.pi ** 2)\n a = 1\n\n term1 = a * (x2 - b * x1 ** 2 + c * x1 - r) ** 2\n term2 = s * (1 - t) * np.cos(x1)\n\n return term1 + term2 + s", "def sine2(x, frequency=10.0, start_phase=0.0, name=''):\n x = x.astype(np.float)\n variables = {\n 'function': sine,\n 'frequency': frequency,\n 'start_phase': start_phase\n }\n y = np.sin(2 * np.pi * frequency * x + start_phase)\n return packer(x, y, variables, name=name)", "def func1(x,u):\r\n return 5*x*u+(x+7)*np.sin(x)", "def sine(x, period=10.0, start_phase=0.0, name=''):\n x = x.astype(np.float)\n variables = {\n 'function': sine, 'period': period, 'start_phase': start_phase}\n y = np.sin(2*np.pi*(x / period) + start_phase)\n return packer(x, y, variables, name=name)", "def f4(x):\n return sin(x)/x", "def f5(x):\n return 2* sin(x) + sin(2*x)", "def F(x):\n soln = x - (1.0/5.0)*math.cos(10.0*x+1.0) \n return soln", "def f(x):\n\treturn (sc.log(x**2+5)*sc.cos(0.8*x)+3.5*x)/(sc.e**(x/10))", "def integrand(u):\n return erfcx(-u)\n #if u < -4.0:\n #return -1. / np.sqrt(np.pi) * (1.0 / u - 1.0 / (2.0 * u**3) + \n ##3.0 / (4.0 * u**5) - \n ##15.0 / (8.0 * u**7))\n #else:\n #return np.exp(u**2) * (1. + erf(u))", "def sin(x):\n\tgetcontext().prec += 2\n\t#if abs(x) > 2 * pi:\n\t\t#x = x % (2 * pi)\n\ti, lasts, s, fact, num, sign = 1, 0, x, 1, x, 1\n\twhile s != lasts:\n\t\tlasts = s\n\t\ti += 2\n\t\tfact *= i * (i-1)\n\t\tnum *= x * x\n\t\tsign *= -1\n\t\ts += num / fact * sign\n\tgetcontext().prec -= 2\n\treturn +s", "def integrate(self, x, dx):\n return self.State.integrate(x, dx)", "def integral(requestContext, seriesList):\n results = []\n for series in seriesList:\n newValues = []\n current = 0.0\n for val in series:\n if val is None:\n newValues.append(None)\n else:\n current += val\n newValues.append(current)\n newName = \"integral(%s)\" % series.name\n newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)\n newSeries.pathExpression = newName\n results.append(newSeries)\n return results", "def sinh(x):\r\n # see decorator for function body\r", "def f(x):\n \"\"\" Xrhsimopoihste MONO ekfraseis klhshs,\n p.x., stis add, mul, pow, sqrt, truediv, ...\n OXI infix telestes (+, /, ...) \"\"\"\n\n return round(truediv(1,add(add(2,truediv(3,add(x,4))),truediv(1,x))),4)", "def _sincfunc(x, dx, dampfac=3.25):\n if dx != 0.0:\n xx = (x+dx)*np.pi #- cache shifted array for 30% faster evals\n return np.exp( -(xx/(dampfac*np.pi))**2 ) * np.sin(xx) / xx\n else:\n xx = np.zeros(len(x))\n xx[len(x)//2] = 1.0\n return xx", "def func0(s):\n\n return s+\"tsy\"", "def scipyTranform(self,s):\n l=len(s)\n wo=2*math.pi/l\n a=2/scipy.integrate.quad(math.cos(n*wo*t),t,-inf,inf)\n b=2/scipy.integrate.quad(math.sin(n*wo*t),t,-inf,inf)", "def test_simpson():\n \n import math\n \n #test 1: with Sinus function\n h = lambda x: (3/2.)*math.sin(x)**3\n\n #Integration limits\n a = 0\n b = math.pi\n \n #Known result\n exact = 2\n \n #Run Simpson rule for h(x) over interval a to b\n test_case = Simpson(h, a, b)\n success = tol(test_case, exact)\n assert success, \"test of integral of sin^3(x) over 0 to pi.\"\n \n \n \n #test 2: with exact result using 2nd degree polynomial\n g = lambda x: 3*(x**2) - 7*x + 2.5 #2nd degree polynomial\n G = lambda x: x**3 - 3.5*(x**2) + 2.5*x #Antiderivative of g(x)\n \n\n #Integration limits\n a = 1.5\n b = 2.0\n \n #Known result\n exact = G(b) - G(a)\n\n #Run Simpson rule for g(x) over interval a to b \n test_case = Simpson(g, a, b)\n success = tol(test_case, exact)\n assert success, \"test of integral of 3x^2 - 7x + 2.5 over 1.5 to 2.\"", "def integrate(x, y, xmin, xmax):\n indexes = get_interval(x, xmin, xmax)\n integral = np.trapz(y[indexes], x[indexes])\n\n return integral", "def calculate_result(x) -> str:\n return str(math.log(abs(12*math.sin(x))))", "def convert_sin(node, **kwargs):\n return create_basic_op_node('Sin', node, kwargs)", "def test_trig_functions(self):\r\n\r\n angles = ['-pi/4', '0', 'pi/6', 'pi/5', '5*pi/4', '9*pi/4', '1 + j']\r\n sin_values = [-0.707, 0, 0.5, 0.588, -0.707, 0.707, 1.298 + 0.635j]\r\n cos_values = [0.707, 1, 0.866, 0.809, -0.707, 0.707, 0.834 - 0.989j]\r\n tan_values = [-1, 0, 0.577, 0.727, 1, 1, 0.272 + 1.084j]\r\n # Cannot test tan(pi/2) b/c pi/2 is a float and not precise...\r\n\r\n self.assert_function_values('sin', angles, sin_values)\r\n self.assert_function_values('cos', angles, cos_values)\r\n self.assert_function_values('tan', angles, tan_values)\r\n\r\n # Include those where the real part is between -pi/2 and pi/2\r\n arcsin_inputs = ['-0.707', '0', '0.5', '0.588', '1.298 + 0.635*j']\r\n arcsin_angles = [-0.785, 0, 0.524, 0.629, 1 + 1j]\r\n self.assert_function_values('arcsin', arcsin_inputs, arcsin_angles)\r\n # Rather than a complex number, numpy.arcsin gives nan\r\n self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arcsin(-1.1)')))\r\n self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arcsin(1.1)')))\r\n\r\n # Include those where the real part is between 0 and pi\r\n arccos_inputs = ['1', '0.866', '0.809', '0.834-0.989*j']\r\n arccos_angles = [0, 0.524, 0.628, 1 + 1j]\r\n self.assert_function_values('arccos', arccos_inputs, arccos_angles)\r\n self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arccos(-1.1)')))\r\n self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arccos(1.1)')))\r\n\r\n # Has the same range as arcsin\r\n arctan_inputs = ['-1', '0', '0.577', '0.727', '0.272 + 1.084*j']\r\n arctan_angles = arcsin_angles\r\n self.assert_function_values('arctan', arctan_inputs, arctan_angles)", "def sin_func(x, amplitude, frequency, phi, offset):\n return amplitude * np.sin(2*np.pi*frequency*x + phi) + offset", "def output_func(x):\n return np.cos(x) + 0.5*x", "def sinc(x):\n y = pi* where(x == 0, 1.0e-20, x)\n return sin(y)/y", "def test_evaluate_cos(self):\n my_module_path = \"math\"\n my_function = \"cos\"\n\n my_module = importlib.import_module(my_module_path)\n\n evaluated_function_str = \"my_module.%s\" % my_function\n\n self.assertTrue(\n evaluated_function_str == \"my_module.cos\", \"testing eval str\")\n\n evaluated_function = eval(evaluated_function_str)\n evaluated_pi = eval(\"my_module.pi\")\n self.assertTrue(evaluated_function(0.0) == 1.0,\n \"cos(0) == 1 \")\n self.assertTrue(abs(evaluated_function(evaluated_pi / 2.0)) < 0.1e-10,\n \"cos( my_module.PI / 2.0 ) == 0 \")\n\n copysign_t1 = self.eval_function(\"math\", \"copysign\", [1, 2])\n copysign_t2 = self.eval_function(\"math\", \"copysign\", [2, -3])\n copysign_t3 = self.eval_function(\"math\", \"copysign\", [-1, 2])\n copysign_t4 = self.eval_function(\"math\", \"copysign\", [2, -2])\n\n self.assertTrue(copysign_t1 == 1)\n self.assertTrue(copysign_t2 == -2)\n self.assertTrue(copysign_t3 == 1)\n self.assertTrue(copysign_t4 == -2)", "def f(x):\n return math.exp(-x**2)/(1+x**2)+(2*math.cos(x)**2)/(1+(x-4)**2)", "def eval(self, x):\n self.__check_input__(x)\n return 418.9829 * self.dim - sum([y * np.sin(np.sqrt(abs(y))) for y in x])", "def integral(\n self,\n start: XValue[T],\n stop: XValue[T],\n transform: Callable[[XValueDiff[T]], float] = lambda x: cast(float, x),\n ) -> float:\n return self.integrals(start, stop, (stop - start), transform).values()[0]", "def sfunc(self,x,y):\n return np.exp(-(x-self.x_0)**2.0-(y-self.y_0)**2.0)", "def psi(x):\n return np.sin(x)", "def integrate_fun(fun: Callable, low_b: float, upp_b: float) -> float:\n return integrate.quad(fun, low_b, upp_b)[0]", "def integral ( self, xmin , xmax , ymin , ymax , nevents = True ) :\n if self.xminmax() :\n xmn , xmx = self.xminmax()\n xmin = max ( xmin , xmn )\n xmax = min ( xmax , xmx )\n\n if self.yminmax() : \n ymn , ymx = self.yminmax() \n ymin = max ( ymin , ymn )\n ymax = min ( ymax , ymx )\n\n value , todo = 0 , True \n \n ## 1) make a try to use analytical integral (could be fast)\n if self.tricks :\n try:\n if hasattr ( self.pdf , 'setPars' ) : self.pdf.setPars() \n fun = self.pdf.function()\n value , todo = fun.integral ( xmin , xmax , ymin , ymax ) , False \n except:\n pass\n\n ## use numerical integration \n from ostap.math.integral import integral2 as _integral2\n\n extended = self.pdf.canBeExtended() or isinstance ( self.pdf , ROOT.RooAddPdf )\n\n if todo and extended : value = _integral2 ( self , xmin , xmax , ymin , ymax )\n elif todo :\n \n ## use unormalized PDF here to speed up the integration \n ifun = lambda x, y : self ( x , y , error = False , normalized = False )\n value = _integral2 ( ifun , xmin , xmax , ymin , ymax )\n norm = self.pdf.getNorm ( self.vars )\n value /= norm\n\n if nevents and self.pdf.mustBeExtended () :\n evts = self.pdf.expectedEvents( self.vars )\n if evts <= 0 or iszero ( evts ) :\n self.warning ( \"integral: expectedEvents is %s\" % evts )\n value *= evts \n\n return value", "def sin(self):\n return type(self)(self.parent(),\n self._simplify(self._express.sin()))", "def f(x):\n return np.tan(x) - np.sin(x) - (m*g)/(2*k*L)", "def integrand(x, n):\n return jn(n, x)", "def integrate_simpson(f, lower, upper, N=1000):\n a = lower # Lower integration limit\n b = upper # Upper integration limit\n w = (b - a) / N # Width of each trapezoid\n\n if N % 2 != 0:\n N += 1\n print(\"Number of slices was odd so 1 was added to N.\")\n\n I = (1 / 3) * f(a) * w + (1 / 3) * f(b) * w # Area of first and last trapezoids\n\n for i in range(1, N, 2): # Odd terms\n I += f(a + i * w) * w * (4 / 3)\n\n for i in range(2, N, 2): # Even terms\n I += f(a + i * w) * w * (2 / 3)\n\n return I, N", "def get_function_from_text(f):\n return lambda x: eval_expr(f, {'x': x}, numpy_dict)", "def integral(self, pars = None):\n if pars is None:\n pars = self.pars_fit\n return .5 * np.sqrt(2.*np.pi) * pars[0] * (pars[2] + pars[3])", "def integrate(f, delta):\r\n I = np.sum(f)\r\n I -= (f[0] + f[-1])/2\r\n I *= delta\r\n return I", "def f(x):\n return x**2", "def FourierSinusoids(F,w,Fs,synthesis=None):\n if synthesis==None:\n synthesis=0;\n \n Ts=1.0/Fs; \n xs=numpy.arange(0,1,Ts) \n \n signal=numpy.zeros(np.shape(xs));\n for i in range(len(F)):\n omega=2*np.pi*F[i];\n signal = signal+ w[i]*numpy.cos(-omega*xs);\n #plot the time domain signal \n subplot(2,1,1)\n plt.plot(range(0,len(signal)),signal)\n xlabel('Time')\n ylabel('Amplitude')\n title('time doman')\n \n #compute the fourier series coefficient\n r1=FourierSeries(signal)\n a1=cabs(r1)\n \n if synthesis==0:\n #plot the freuency domain signal\n L=len(a1);\n fr=np.arange(0,L);\n subplot(2,1,2)\n plt.stem(fr,a1,'r') # plotting the spectrum\n xlabel('Freq (Hz)')\n ylabel('|Y(freq)|')\n title('complete signal')\n ticks=np.arange(0,L+1,25);\n plt.xticks(ticks,ticks); \n show() \n \n if synthesis==1:\n rsignal=IFourierSeries(r1);\n print np.allclose(rsignal, signal) \n subplot(2,1,2) \n plt.stem(xs,signal)\n xlabel('Time')\n ylabel('Amplitude')\n title('reconstructed signal')\n show()", "def SI_string_to_float(inStr, debug = False):\n func_name = \"SI_string_to_float\"\n \n # Debug print incoming string. \n if debug: print(\"DEBUG: (Func = %s): Input-str: %s\" %( func_name, inStr ))\n \n #Remove all spaces from incoming string. \n inStr = inStr.replace(\" \", \"\"); \n if debug: print(\"DEBUG: (Func = %s): Removed spaces: %s\" %( func_name, inStr ))\n \n # Allocate return value, and search in\n result = None\n letters = re.search( r'([\\d\\.]+)([a-z A-Z]+)', inStr)\n \n # Query if match was found. If not, print warning then try to directly convert incoming string.\n if letters:\n try:\n value = float(letters.group(1))\n scale = float(SI_UNITS[letters.group(2)])\n result = value * scale\n if debug: print(\"DEBUG: (Func = %s): Value: %f, scale: %f, result: %f\"%(func_name, value,scale,result))\n except:\n print(\"ERROR: (Func = %s): Couldn't extract value and SI-Unit.\"%func_name)\n print(\" Possible issue with seaching 'SI_UNITS for (%s)\"% scale)\n else:\n print(\"WARNING: (Function = %s) Couldn't extract value and SI-Unit. Will attempt direct float conversion... \"%func_name)\n #print(\" Used the following regex: '([\\d\\.]+)([a-z A-Z]+)'\")\n result = float(inStr) # TODO : Insert try catch \n \n return result", "def _call(self, x):\n # TODO: update when integration operator is in place: issue #440\n pointwise_norm = self.pointwise_norm(x)\n return pointwise_norm.inner(pointwise_norm.space.one())", "def trig(num, op):\n if op == 1:\n return sin(num)\n elif op == 2:\n return cos(num)\n elif op == 3:\n return tan(num)\n elif op == 4:\n return 1 / cos(num)\n elif op == 5:\n return 1 / sin(num)\n elif op == 6:\n return 1 / tan(num)", "def simpson(func, start, stop):\n return (func(start) + 4*func((start+stop)/2) + func(stop)) * (stop-start)/6", "def my_fn(x):\n return 0.4*(0.5*(np.exp(x*4) - np.exp(-x*4)) - 8*x + 0.3*x**2 - 2*x**3 + 0.8)", "def integral(self, pars = None):\n if pars is None:\n pars = self.pars_fit\n return 2.*np.pi * pars[0] * pars[2] * pars[4]", "def integral(self, pars = None):\n if pars is None:\n pars = self.pars_fit\n return 2.*np.pi * pars[0] * pars[2] * pars[4]", "def sin(n, w0=1, degrees=False):\n assert type(w0) != complex, 'sin: w0 should be a real number.'\n if degrees:\n t = t * np.pi / 180\n return np.sin(w0 * t)", "def make_oscillator(frequency):\n return lambda t: math.sin(t*frequency)", "def michalewicz(x):\n return x * sin(10 * pi * x) + 1.0", "def sin(angle):\n return math.sin(math.radians(angle))", "def integrate(self):\n raise MethodImplementationError(self, 'integrate')", "def f(x):\n return N.sqrt(N.power(N.cos(x),2)+1.0)", "def _generateLambda(self, string):\n derivation = self.fieldNames.sub(r'parent.getSampleValue(stats, \"\\1\")',\n string)\n return lambda stats, parent: eval(derivation)", "def sp_integrate_1D_ ( pdf , xmin , xmax , *args , **kwargs ) :\n if hasattr ( pdf , 'setPars' ) : pdf.setPars() \n func = pdf.function()\n return func.sp_integrate_1D ( xmin , xmax , *args , **kwargs )", "def sinh(x):\n raise NotImplementedError", "def integrate(self, units=True, e_weight=0):\n int = logsimps(lambda e: e**e_weight*self(e, units=False), self.emin, self.emax, sed_config.PER_DECADE)\n return int*(u.erg**(e_weight+1)*self.units() if units else 1)", "def sin(n, w0=1, degrees=False):\n assert np.all(np.array(n, dtype=int) == n), 'sin: n should be an integer or an array of integers.'\n assert type(w0) != complex, 'sin: w0 should be a real number.'\n if degrees:\n n = n * np.pi / 180\n return np.sin(w0 * n)", "def f_sin(k):\n return k * k * k * pk(k, suppression)", "def wavefunction(self, x):\n return ( float(1) / math.pi**(float(1)/4)) * math.exp( x**2 / float(-2))", "def lambda_method(self,t): \n return 5*math.sin(2*math.pi*1*t) # I don't see the value of 1 here but this is how lamda is defined in the exercise.", "def Sin(num):\n return math.sin(float(num))", "def cosine_interpolate(self, a, b, x):\n ft = x * 3.1415927\n f = (1 - math.cos(ft)) * 0.5\n return a * (1 - f) + (b * f)", "def integral(self, pars = None):\n if pars is None:\n pars = self.pars_fit\n return np.sqrt(2.*np.pi) * pars[0] * pars[2]" ]
[ "0.63227785", "0.62122375", "0.61956286", "0.60972846", "0.6085416", "0.60581106", "0.60526955", "0.60383034", "0.6019527", "0.5990838", "0.5950136", "0.5950136", "0.5950136", "0.5950136", "0.58858174", "0.588468", "0.58532083", "0.58137757", "0.5803429", "0.5779814", "0.5709809", "0.56989974", "0.56988394", "0.5696074", "0.56615335", "0.56344116", "0.56009626", "0.55934536", "0.5590319", "0.5549469", "0.5520301", "0.5514904", "0.55077136", "0.5489866", "0.5476888", "0.54527706", "0.54235667", "0.54103327", "0.54096186", "0.5404347", "0.54017764", "0.53891414", "0.53649485", "0.5360374", "0.535133", "0.5344592", "0.53397846", "0.53307605", "0.53245103", "0.53192747", "0.53178585", "0.53051955", "0.52833724", "0.5283009", "0.52737623", "0.5256745", "0.52548057", "0.52513736", "0.5219209", "0.5216165", "0.5213227", "0.52118796", "0.521157", "0.5205483", "0.52019846", "0.5179803", "0.51505584", "0.5147485", "0.5143823", "0.51434124", "0.5133673", "0.51178664", "0.5110652", "0.5110468", "0.50754803", "0.5072889", "0.50672907", "0.50555265", "0.50520355", "0.5050193", "0.50496733", "0.5049249", "0.5049249", "0.50486773", "0.50322026", "0.5031497", "0.5014797", "0.5012092", "0.5011599", "0.5004178", "0.49976194", "0.49898922", "0.49894363", "0.49820343", "0.4975081", "0.49723157", "0.4964232", "0.49635452", "0.4957335", "0.4948001" ]
0.74676055
0
Hmmm. There should aways be some common base path.
def _find_base_path(self): paths = [path for path, content in self._templates] if len(paths) == 1: return os.path.dirname(paths[0]) return common_path_prefix(paths)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def base_dir(self):\n pass", "def get_base_path(self) -> str:\n raise NotImplementedError()", "def base_path(self):\n return self.setup.base_path", "def base(path1, *paths):\r\n return BASE_DIR.relpathto(path1.joinpath(*paths))", "def base():\n print(CFG.base.path)", "def base_dir(self, value):\n pass", "def base_path(self):\n return self._base_path", "def relative_base(base):\n return as_base(base).lstrip('/')", "def add_base(paths):\r\n\r\n return [os.path.join(BASEDIR, x) for x in paths]", "def set_basedir(self, host, path):", "def basepath(*args):\n return join(dirname(__file__), '../../', *args)", "def basepath():\n return os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n '..'\n )\n )", "def as_base(path):\n path = path if path.startswith('/') else '/' + path\n return path if path.endswith('/') else path + '/'", "def base_path(self):\n return Path(self.path)", "def base_dir(context):\n return '{}'.format(os.getcwd())", "def get_resource_base_path(self): # real signature unknown; restored from __doc__\n return \"\"", "def test_base_dir(self):\n self.assertEqual(self.settings.BASE_DIR, TestPredefines.BASE_DIR)", "def base_url_path(self):\n path = urlsplit(self.base_url())[2]\n if path.endswith(\"/\"):\n path = path[:-1]\n return path", "def _get_base_url(self):\n return '/{}/'.format(self.name.replace('__', '/'))", "def get_base_dir(config: Mapping[str, Any]) -> str:\n return normalize_base_dir(config.get(\"base_dir\"))", "def getuserbase():\n\tpass", "def base(self):\n return os.path.basename(self.path)", "def _base_folder(self, base_folder, obj):\n # Large portions of this code came from Products.ATContentTypes\n # TODO: a package to deal with this kind of stuff (string to object?)\n # sanitize a bit: you never know, with all those win users out there\n relPath = base_folder.replace('\\\\', '/')\n if not relPath:\n return self._portal\n if relPath[0] == '/':\n # someone didn't enter a relative path.\n # let's go with it\n path = relPath.split('/')[1:]\n else:\n folders = relPath.split('/')\n\n # set the path to the object path\n path = self._relPathToPortal(aq_parent(obj))\n\n # now construct an aboslute path based on the relative custom path\n # eat away from 'path' whenever we encounter a '..'\n # in the relative path apend all other elements other than ..\n for folder in folders:\n if folder == '..':\n # chop off one level from path\n if path == []:\n # can't chop off more\n # just return this path and leave the loop\n break\n else:\n path = path[:-1]\n elif folder == '.':\n # don't really need this but for being complete\n # strictly speaking some user may use a . aswell\n pass # do nothing\n else:\n path.append(folder)\n\n if not (path == []):\n # As we will traverse from portal, there is no need to\n # have its path in the way\n path = '/'.join(path)\n try:\n baseFolder = self._portal.unrestrictedTraverse(path)\n except (AttributeError, KeyError):\n baseFolder = None\n else:\n baseFolder = self._portal\n return baseFolder", "def resolve(fname):\n if os.path.dirname(__file__):\n return os.path.dirname(__file__) + \"/../common/\" + fname\n else:\n return \"/common/\" + fname", "def getBaseFolder(globalsDict=None):\n globalsDictHere = globalsDict or globals()\n baseFolder = \"\"\n if globalsDictHere['__name__'] == \"__main__\":\n baseFolder = os.path.split(sys.argv[0])[0]\n # print(('baseFolder from argv: %s'% baseFolder))\n elif globalsDictHere['__file__']:\n baseFolder = os.path.split(globalsDictHere['__file__'])[0]\n # print(('baseFolder from __file__: %s'% baseFolder))\n if not baseFolder or baseFolder == '.':\n baseFolder = os.getcwd()\n # print(('baseFolder was empty, take wd: %s'% baseFolder))\n return baseFolder", "def getBasePath(request):\n return request.rootpage.getPagePath('pages')", "def relpath(targpath: str, basepath: str='') -> str:\n pass", "def test_base_dir(self):\n old_base_dir = self.path_translator.BASE_REAL_DIR\n self.path_translator.BASE_REAL_DIR = \"/tmp/study\"\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1\".format(self.search.instance))]\n self.assertEqual(result, expected)\n self.path_translator.BASE_REAL_DIR = old_base_dir", "def base_dir(path=None, base=None, max_levels=100):\n path = path or _get_caller_path()\n path, children = _repo_dir_and_children(path, max_levels=max_levels)\n if path and base:\n # Explicit base\n return op.join(path, base)\n elif path and children:\n if children[0] in ['data', 'models', 'reports', 'src']:\n # The repo_dir IS the data science dir, so just return the repo_dir\n return path\n else:\n # Implicit base\n return op.join(path, children[0])\n else:\n # Not found\n return None", "def get_short_url_base():", "def getRootURL():", "def relative_path(base, target):\r\n common, base_tail, target_tail = split_common(base, target)\r\n #print \"common:\", common\r\n #print \"base_tail:\", base_tail\r\n #print \"target_tail:\", target_tail\r\n r = len(base_tail) * [os.pardir] + target_tail\r\n if r:\r\n return os.path.join(*r)\r\n else:\r\n return os.curdir", "def _get_default_path(self):\n\n raise NotImplementedError()", "def get_base_dir(self):\n dir_of_this_file = os.path.dirname(os.path.abspath(__file__))\n return os.path.dirname(dir_of_this_file)", "def _get_mount_point_base(self):\n raise NotImplementedError('_get_mount_point_base')", "def subdir(self):", "def buildpath(self):\n basepath = urlutil.href_settings.root + (self.relpath if self.relpath else cherrypy.request.path_info)\n if basepath.find('~') < 0:\n basepath += ('' if basepath.endswith('/') else '/') + '~'\n if cherrypy.request.query_string:\n basepath += ('&' if basepath.find('?') >= 0 else '?') + cherrypy.request.query_string\n return basepath", "def getBaseURL():\n return getQualifiedURL(getScriptname())", "def _get_base_schema_path(base_schema: str = None) -> str:\n biothings_schema_path = LOADER.filename(\"data_models/biothings.model.jsonld\")\n base_schema_path = biothings_schema_path if base_schema is None else base_schema\n\n return base_schema_path", "def _get_base_path_pattern(self):\n if self._base_path is not None:\n return '^%s' % re.escape(self._base_path)\n return None", "def set_base_path(self, base_path):\n self._base_path = base_path", "def test_root() -> Path:\n return TEST_ROOT", "def getRootPath():\n return '/'.join(__file__.split('/')[:-4]) # Path of this file with pagebot/__init__.py(c) removed.", "def cwd(self):", "def _abs_path(rel_path):\n return os.path.join(BASE_DIR, rel_path)", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def _base(self):\n pass", "def _set_rel_paths(self):\n if self.working_dir is not None:\n self._rel_working_dir = os.path.relpath(self.working_dir)\n if self.alignment is not None:\n self._rel_alignment = os.path.relpath(self.alignment, \n self.working_dir)\n if self.out_file is not None:\n self._rel_out_file = os.path.relpath(self.out_file, \n self.working_dir)", "def get_base_dir():\n # copied from config2.py, without the lines that check whether the\n # directory already contains a user-config.py file\n # this code duplication is nasty, should fix\n NAME = \"pywikibot\"\n for arg in sys.argv[1:]:\n if arg.startswith(\"-dir:\"):\n base_dir = arg[5:]\n sys.argv.remove(arg)\n break\n else:\n if \"PYWIKIBOT2_DIR\" in os.environ:\n base_dir = os.environ[\"PYWIKIBOT2_DIR\"]\n else:\n is_windows = sys.platform == 'win32'\n home = os.path.expanduser(\"~\")\n if is_windows:\n _win_version = int(platform.version()[0])\n if _win_version == 5:\n base_dir = os.path.join(home, \"Application Data\", NAME)\n elif _win_version == 6:\n base_dir = os.path.join(home, \"AppData\\\\Roaming\", NAME)\n else:\n base_dir = os.path.join(home, \".\"+NAME)\n if not os.path.isdir(base_dir):\n os.makedirs(base_dir, mode=0700)\n if not os.path.isabs(base_dir):\n base_dir = os.path.normpath(os.path.join(os.getcwd(), base_dir))\n return base_dir", "def get_basedir():\n pydir = os.path.dirname(os.path.realpath(__file__))\n return os.path.abspath(os.path.join(pydir, os.pardir))", "def test_get_base_url():\n eq_(get_base_url(\"http://foo.com/bar/baz\"), \"http://foo.com\")\n eq_(get_base_url(\"https://foo.com:443/foo/bar\"), \"https://foo.com:443\")", "def get_imagebase(self):\n pass", "def _get_org_base_dir(self, org_id):\n return self._get_persistent_mpe_dir().joinpath(org_id)", "def get_all_path(self, conf):\n\t\tpass", "def _get_service_base_dir(self, org_id, service_id):\n return self._get_persistent_mpe_dir().joinpath(org_id, service_id)", "def _relativize(base: str, current: str) -> str:\n if current.startswith(base):\n return current.replace(base, \"\", 1)\n return current", "def root_dir():\r\n return Path(__file__).parent.parent", "def base_uri(relative_path=''):\n base_path = get_app_root()\n if not os.path.exists(base_path):\n raise ValueError('Path %s does not exist' % base_path)\n\n return 'file://%s' % os.path.join(base_path, relative_path)", "def GetBase(self, fname, suffix):\n wds = fname.split('/')\n suff = suffix.replace('.BRIK','')\n suff = suff.replace('.HEAD','')\n if len(wds) > 1:\n return '.../%s' % '/'.join(wds[-2:]) + suff\n else:\n return fname + suff", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def becv_dir(*arg):\n return _path.join(BASE_DIR, *arg)", "def get_base_name(path):\n return os.path.basename(path).split('.')[0]", "def _fullpath(self, path):\n splitpath = path.split(self._baseurl, 2)\n if len(splitpath) == 1:\n result = os.path.join(self._baseurl, path)\n else:\n result = path # path contains baseurl already\n return result", "def sub_base_pod_path(self):\n return self.pod_path[len(self._base_source_path):]", "def get_http_path_prefix():\n return os.path.join(CONF.deploy.http_root, '')", "def commonpath(a, b):\r\n a = normpath(normcase(a))\r\n b = normpath(normcase(b))\r\n\r\n if a == b:\r\n return a\r\n\r\n while len(a) > 0:\r\n if a == b:\r\n return a\r\n\r\n if len(a) > len(b):\r\n a = dirname(a)\r\n else:\r\n b = dirname(b)\r\n\r\n return None", "def _get_basedir(datadir, target_genome):\n genome_dir = os.path.join(datadir, \"genomes\")\n for dirname in glob.glob(os.path.join(genome_dir, \"*\", \"*\")):\n if dirname.endswith(\"/%s\" % target_genome):\n return dirname", "def force_absolute(base, path):\n if os.path.abspath(path) and os.path.exists(path):\n return path\n else:\n return path_format(base + path)", "def find_test_base():\n\n test_base = os.path.dirname(os.path.realpath(__file__))\n\n def templates_exist(test_base):\n return os.path.exists(os.path.join(test_base, 'templates'))\n\n if not templates_exist(test_base):\n test_base = os.path.dirname(os.path.abspath(sys.argv[0]))\n if not templates_exist(test_base):\n # python 2.7 may not always be able to find the test directory at\n # all stages of test; try to rely on a provided tox ini directory if\n # running in a tox environment\n if sys.version_info < (3, 0) and 'TOX_INI_DIR' in os.environ:\n root_dir = os.environ['TOX_INI_DIR']\n test_base = os.path.join(root_dir, 'tests')\n\n if not templates_exist(test_base):\n raise RuntimeError('unable to find test base directory')\n\n return test_base", "def relative_uri(base, to):\n if to.startswith(SEP):\n return to\n b2 = base.split(SEP)\n t2 = to.split(SEP)\n # remove common segments (except the last segment)\n for x, y in zip(b2[:-1], t2[:-1]):\n if x != y:\n break\n b2.pop(0)\n t2.pop(0)\n if b2 == t2:\n # Special case: relative_uri('f/index.html','f/index.html')\n # returns '', not 'index.html'\n return ''\n if len(b2) == 1 and t2 == ['']:\n # Special case: relative_uri('f/index.html','f/') should\n # return './', not ''\n return '.' + SEP\n return ('..' + SEP) * (len(b2)-1) + SEP.join(t2)", "def test_get_pyrin_root_path():\n\n root_path = os.path.abspath('.')\n assert application_services.get_pyrin_root_path() == root_path", "def make_relative(self,basepath = None):\n __make_relative__(run_object=self,basepath=basepath)", "def rosbase(fname,checkfs=True):\n\tif checkfs: assert os.path.exists(fname)\n\tif checkfs: fname = os.path.abspath(fname)\n\tmark = \"rosetta_source/src\"\n\tassert fname.find(mark) > 0\n\treturn fname[fname.find(mark)+15:]", "def relpath(long_path, base_path):\n if not hasattr(path, \"relpath\"):\n\n if not long_path.startswith(base_path):\n raise RuntimeError(\"Unexpected arguments\")\n\n if long_path == base_path:\n return \".\"\n\n i = len(base_path)\n\n if not base_path.endswith(path.sep):\n i += len(path.sep)\n\n return long_path[i:]\n else:\n return path.relpath(long_path, base_path)", "def GetBaseURL(self):\n base_url = self.server_base_url\n if base_url is None:\n base_url = 'http://%s:%s' % self.server_address[:2]\n\n return base_url", "def common_path_prefix(p1, p2):\n return common_segments(p1, p2, common_func=common_prefix)", "def getRootPath()->str:\n if '--develop' in sys.argv:\n return eel._get_real_path('public') + '/'\n\n return eel._get_real_path('build') + '/'", "def _get_reporoot():\n from os import path\n import acorn\n medpath = path.abspath(acorn.__file__)\n return path.dirname(path.dirname(medpath))", "def __init__(self):\n root_dir = os.path.dirname(os.path.abspath(__file__))\n self.base_dir = root_dir + \"/data/index/\" # base directory location for all indexes", "def base_path(self):\n return \"/repos/{}/{}\".format(self.owner, self.label)", "def tex_base_path(self, file_path):\n file_path = os.path.normpath(file_path)\n try:\n base_path = self._import_base_paths[file_path]\n except KeyError:\n base_path, _ = os.path.split(self._tex_root)\n return base_path", "def get_base_dir(self):\n return self._config_dict['output']['@baseDirectory']", "def base_dir():\n return os.path.join(TrainFile.base_dir(), 'model')", "def base_path_format(self):\n return '{}{}'.format(\n self._base_path_format,\n self.sub_base_pod_path)", "def base_dir(self):\n return self.cm.get(YAML_CONFIG_WORKING_REPO)", "def get_base_url(self):\n return self.base_url", "def get_base_url(self):\n return urlparse.urljoin(self.domain, self.root_path)", "def project_root() -> Path:\n return PROJECT_ROOT", "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'visual_genome')", "def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP", "def _GuessBase(self, required):\r\n url = self._GetInfo(\"URL\")\r\n if url:\r\n scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)\r\n guess = \"\"\r\n # TODO(anatoli) - repository specific hacks should be handled by server\r\n if netloc == \"svn.python.org\" and scheme == \"svn+ssh\":\r\n path = \"projects\" + path\r\n scheme = \"http\"\r\n guess = \"Python \"\r\n elif netloc.endswith(\".googlecode.com\"):\r\n scheme = \"http\"\r\n guess = \"Google Code \"\r\n path = path + \"/\"\r\n base = urlparse.urlunparse((scheme, netloc, path, params,\r\n query, fragment))\r\n logging.info(\"Guessed %sbase = %s\", guess, base)\r\n return base\r\n if required:\r\n ErrorExit(\"Can't find URL in output from svn info\")\r\n return None", "def get_default_secrets_basedir():\n default_basedir = Path.home() / BASEDIR_BASENAME\n return Path(\n os.getenv('D2_SECRETS_BASEDIR', default_basedir)\n )", "def cwd_in_path():\n ...", "def base_only(self):\n return self.base", "def _get_base_endpoint_name(self):", "def get_default_paths():\n DATA_ROOT = os.environ.get(\"DATA_ROOT\", \"data\")\n defaults = {\n \"TOKENIZE_DATA_DIR\": DATA_ROOT + \"/tokenize\",\n \"MWT_DATA_DIR\": DATA_ROOT + \"/mwt\",\n \"LEMMA_DATA_DIR\": DATA_ROOT + \"/lemma\",\n \"POS_DATA_DIR\": DATA_ROOT + \"/pos\",\n \"DEPPARSE_DATA_DIR\": DATA_ROOT + \"/depparse\",\n \"ETE_DATA_DIR\": DATA_ROOT + \"/ete\",\n \"NER_DATA_DIR\": DATA_ROOT + \"/ner\",\n \"CHARLM_DATA_DIR\": DATA_ROOT + \"/charlm\",\n \"SENTIMENT_DATA_DIR\": DATA_ROOT + \"/sentiment\",\n \"CONSTITUENCY_DATA_DIR\": DATA_ROOT + \"/constituency\",\n\n # Set directories to store external word vector data\n \"WORDVEC_DIR\": \"extern_data/wordvec\",\n\n # TODO: not sure what other people actually have\n # TODO: also, could make this automatically update to the latest\n \"UDBASE\": \"extern_data/ud2/ud-treebanks-v2.11\",\n \"UDBASE_GIT\": \"extern_data/ud2/git\",\n\n \"NERBASE\": \"extern_data/ner\",\n \"CONSTITUENCY_BASE\": \"extern_data/constituency\",\n \"SENTIMENT_BASE\": \"extern_data/sentiment\",\n\n # there's a stanford github, stanfordnlp/handparsed-treebank,\n # with some data for different languages\n \"HANDPARSED_DIR\": \"extern_data/handparsed-treebank\",\n\n # directory with the contents of https://nlp.stanford.edu/projects/stanza/bio/\n # on the cluster, for example, /u/nlp/software/stanza/bio_ud\n \"BIO_UD_DIR\": \"extern_data/bio\",\n\n # data root for other general input files, such as VI_VLSP\n \"EXTERN_DIR\": \"extern_data\",\n }\n\n paths = { \"DATA_ROOT\" : DATA_ROOT }\n for k, v in defaults.items():\n paths[k] = os.environ.get(k, v)\n\n return paths", "def base_name(path):\n return os.path.basename(path)", "def test_get_cf_host_path(self):\n os.environ['ROOT_DIR'] = '/host'\n os.environ['WORKER_ROOT_DIR'] = '/worker'\n worker_path = os.path.join(os.environ['WORKER_ROOT_DIR'], 'a', 'b', 'c')\n\n self.assertEqual(file_host.rebase_to_host_root(worker_path), '/host/a/b/c')\n\n worker_path = os.environ['WORKER_ROOT_DIR']\n self.assertEqual(file_host.rebase_to_host_root(worker_path), '/host')", "def _graceful_relative_url(base_url, url):\n if url == base_url:\n return ''\n base_prefix = '%s://%s' % urlparse.urlparse(base_url or '')[0:2]\n url_prefix = '%s://%s' % urlparse.urlparse(url or '')[0:2]\n if base_prefix == url_prefix and url_prefix != '://':\n return url[len(url_prefix):]\n return url", "def get_paths():\n\n # Get repo name\n git_repo = git.Repo(__file__, search_parent_directories=True)\n repo = git_repo.git.rev_parse(\"--show-toplevel\")\n\n paths = {\"repo\": repo, \"base\":{}, \"src\":{}, \"data\":{}, \"app\":{}}\n\n for base_dir in [\"data\", \"notebooks\", \"src\", \"model\", \"logs\", \"app\"]:\n\n paths[\"base\"][base_dir] = os.path.join(repo, base_dir)\n test = paths[\"base\"][base_dir].split(base_dir)[-1]\n assert len(test) == 0\n\n for src_dir in [\"conf\", \"data\", \"notebooks\", \"tests\", \"utils\",\n \"visualize\", \"conf\", \"model\"]:\n\n src_base_dir = paths.get(\"base\").get(\"src\")\n paths[\"src\"][src_dir] = os.path.join(src_base_dir, src_dir)\n test = paths[\"src\"][src_dir].split(src_dir)[-1]\n assert len(test) == 0\n\n for data_dir in [\"raw\", \"interim\", \"processed\"]:\n\n data_base_dir = paths.get(\"base\").get(\"data\")\n paths[\"data\"][data_dir] = os.path.join(data_base_dir, data_dir)\n test = paths[\"data\"][data_dir].split(data_dir)[-1]\n assert len(test) == 0\n\n for app_dir in [\"templates\", \"static\"]:\n app_base_dir = paths.get(\"base\").get(\"app\")\n paths[\"app\"][app_dir] = os.path.join(app_base_dir, app_dir)\n\n return paths" ]
[ "0.75021005", "0.7379301", "0.71568936", "0.71341574", "0.70445836", "0.7044528", "0.6859555", "0.68217397", "0.6766423", "0.6734894", "0.6719331", "0.6719189", "0.67174464", "0.657178", "0.6568198", "0.6524185", "0.64243567", "0.6399701", "0.6332476", "0.63293785", "0.6321343", "0.6318737", "0.6290988", "0.62906694", "0.62569654", "0.62546265", "0.62332976", "0.6230783", "0.6229648", "0.6214237", "0.6191967", "0.61860967", "0.6184963", "0.61841965", "0.6146954", "0.6104899", "0.60990494", "0.6079303", "0.60649395", "0.6064627", "0.6063034", "0.60570097", "0.6055842", "0.60279137", "0.60178274", "0.6009485", "0.6007506", "0.5996904", "0.5992511", "0.59874415", "0.5985071", "0.5958106", "0.5957618", "0.59358996", "0.5927972", "0.5926414", "0.5922897", "0.59215677", "0.5917444", "0.5913326", "0.5904961", "0.5904749", "0.5899226", "0.58929664", "0.58923906", "0.5886389", "0.5881904", "0.5879678", "0.58759326", "0.58744633", "0.58646744", "0.5862209", "0.58594716", "0.5851214", "0.58431286", "0.58427393", "0.58179754", "0.58155435", "0.5813583", "0.5811549", "0.5810724", "0.5809783", "0.5806085", "0.5799658", "0.57720953", "0.57654536", "0.5756934", "0.57366014", "0.5735404", "0.5731321", "0.57244", "0.5718448", "0.57145494", "0.57119423", "0.57106054", "0.5710402", "0.57045245", "0.57031506", "0.57005614", "0.5697043" ]
0.7126339
4
sets attribute 'view' to false to close menu
def set_view_false(self) -> None: self.view = False pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close_menu():\n m = GD.gui.menu.item('Tools')\n if m :\n m.remove()", "def close_menu(self):\n self.menu.quit()", "def close(self):\n self.dismiss()\n screenmanager.change_to('main_menu')", "def OnClose(self, event):\n self.Show(False)", "def OnClose(self, event):\n\t\tself.Show(False)", "def close_UI(self):", "def return_main_menu(self, event):\n self.Destroy()", "def delete_menu():", "def close_menu(game_event):\n wire_menu.close(index_from_userid(game_event.get_int('userid')))", "def restart_menu(self):\n self.__show_menu = True", "def hide(self):\n self.visible = False", "def Command(self, id, msg):\n\n # If the user click on the \"Close\" item of the menu\n if id == self.ID_LEFT_MENU_FIRST_ITEM:\n self.Close()\n\n # If the user click on the bitmap button from the menu\n elif id == self.ID_RIGHT_MENU_SHOW_CONTENT:\n # Updates the stored value of the toggle state\n self.toogleState = not self.toogleState\n\n # Hides the element\n self.HideElement(self.ID_HIDDEN_GROUP, self.toogleState)\n\n # Notifies that the content of the parent group of the group we just hide has changed and need to be redrawn\n self.LayoutChanged(self.ID_MAIN_GROUP)\n\n return True", "def unhide(self):\n self.course.quick_action(self.id, 'show')", "def destroy(self):\r\n self.visible = False", "def hide(self):\r\n\t\tself.frame.Show(False)", "def menuExit(self, event):\n \n self.onClose(event)\n return", "def on_hide_view(self):\n self.ui_manager.unregister_handlers()", "def on_hide_view(self):\n self.ui_manager.unregister_handlers()", "def on_hide_view(self):\n self.ui_manager.unregister_handlers()", "def openOptions(self, e):\n\n\t\tself.unBind()\n\t\tself.menu_manager.runOptions()\n\t\tself.main_menu_window.root.destroy()", "def keyboard_menu_control(self, app):\n mx, my = pg.mouse.get_pos()\n click = False\n\n menu_view = self.get_view.menu_view\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n sys.exit()\n\n if event.type == pg.MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n\n if menu_view.menu_button.collidepoint((mx, my)):\n if click:\n app.menu_view_running = False\n\n if menu_view.quit_button.collidepoint((mx, my)):\n if click:\n pg.quit()\n sys.exit(0)", "def _set_is_open_to_false():\r\n type(self).__is_open = False\r\n password_window.destroy()", "def on_cancel_click(self):\r\n\t\t# self.parent.show()\r\n\t\tself.close()", "def hide(self, event=None):\r\n self.visible = 0\r\n self.withdraw()", "def hide(self, event=None):\r\n self.visible = 0\r\n self.withdraw()", "def hide( self, event=None ):\n self.visible = 0\n self.withdraw()", "def hide( self, event=None ):\n self.visible = 0\n self.withdraw()", "def close_menu(event: EventType, widget: WidgetType) -> bool:\n return event.key == KEY_CLOSE_MENU", "def hide(self):\n self.set_visible(False)", "def toggle(self):\n self.open = not self.open", "def OnClose(self, event):\n self._is_open = False\n wx.PostEvent(self, wxDockPaneClosedEvent())", "def close(self):\n self.parent.activate()", "def initViewMenu(self):\n menu = QMenu(QCoreApplication.translate('ViewManager', '&View'),\n self.ui)\n menu.setTearOffEnabled(True)\n menu.addActions(self.viewActGrp.actions())\n menu.addSeparator()\n menu.addActions(self.viewFoldActGrp.actions())\n menu.addSeparator()\n menu.addAction(self.previewAct)\n menu.addAction(self.astViewerAct)\n menu.addSeparator()\n menu.addAction(self.unhighlightAct)\n menu.addSeparator()\n menu.addAction(self.newDocumentViewAct)\n if self.canSplit():\n menu.addAction(self.newDocumentSplitViewAct)\n menu.addSeparator()\n menu.addAction(self.splitViewAct)\n menu.addAction(self.splitOrientationAct)\n menu.addAction(self.splitRemoveAct)\n menu.addAction(self.nextSplitAct)\n menu.addAction(self.prevSplitAct)\n \n return menu", "def hide(self, event=None):\n self.visible = 0\n self.withdraw()", "def OnClose(self, event = None):\n ##Close.\n self.Hide()\n self.Destroy()", "def hide (self):\n \n self.top.withdraw()", "def unHide(self):\n self.visible = True", "def show_menu(self):\n curses.curs_set(0)\n self.main_menu.display()", "def noButton(self):\n \n self.answer=\"no\"\n self.top.destroy()", "def _cleanup_nastran_tools_and_menu_items(self):\n self.nastran_tools_menu.setVisible(False)\n\n #self.menu_help.menuAction().setVisible(True)\n #self.menu_help2.menuAction().setVisible(False)\n self.nastran_toolbar.setVisible(False)\n self.actions['nastran'].setVisible(False)", "def close_menu_all(game_event):\n wire_menu.close()", "def hide(self):\n self.course.quick_action(self.id, 'hide')", "def remove_menu(menu_name):\n\n pass", "def hide(self):\n self.root.iconify() # self.root.withdraw()", "def show_menu():\n if not GD.gui.menu.item('Tools'):\n create_menu()", "def close(event):\n event.widget.destroy()", "def ocultar(self, widget, *args):\n self.show(False)\n self.ventana.hide_all()", "def close_navbar(self):\n self._close_layouting_element(\"AppBar\")", "def hide_gui():\n pass", "def filemenu_Close(self):\n\n self.on_closing()", "def close(self):\n \n return self.set_level('down')", "def show_menus(self, type_):\n if type_ == self._current:\n # do nothing\n pass\n else:\n if self._current == self.TYPE_VOIGT:\n # Plot menus are visible; hide them.\n plot_menu_labels = [menu.label for menu in self._plot_menus]\n\n for menu in self.top_level_menus:\n if menu.label in plot_menu_labels:\n self.Remove(self.FindMenu(menu.label))\n elif self._current == self.TYPE_GISO:\n # Plot menus are visible; hide them.\n plot_menu_labels = [menu.label for menu in self._plot_menus]\n\n for menu in self.top_level_menus:\n if menu.label in plot_menu_labels:\n self.Remove(self.FindMenu(menu.label))\n\n # Rebuild the view menu by deleting everything from it and then \n # reappending the appropriate items.\n while self.view_menu.GetMenuItemCount():\n #self.view_menu.DeleteItem(self.view_menu.FindItemByPosition(0))\n self.view_menu.Delete(self.view_menu.FindItemByPosition(0))\n\n _append_items(self._main, self.view_menu, self._menu_data[type_])\n\n if type_ == self.TYPE_VOIGT:\n # add plot menus\n for menu in self._plot_menus[::-1]:\n self.Insert(_PLOT_MENU_INSERT_INDEX, menu, menu.label)\n # Under wxPython 2.9, the menus I add with this call to \n # Insert() don't have their label set. I think it's a bug,\n # but I can't recreate it outside of this app. Manually\n # setting the label here is a workaround.\n self.SetMenuLabel(_PLOT_MENU_INSERT_INDEX, menu.label)\n elif type_ == self.TYPE_GISO:\n # add plot menus\n for menu in self._plot_menus[::-1]:\n self.Insert(_PLOT_MENU_INSERT_INDEX, menu, menu.label)\n # Under wxPython 2.9, the menus I add with this call to \n # Insert() don't have their label set. I think it's a bug,\n # but I can't recreate it outside of this app. Manually\n # setting the label here is a workaround.\n self.SetMenuLabel(_PLOT_MENU_INSERT_INDEX, menu.label)\n\n\n self._current = type_", "def landlord_button_close(self):\n return self.write({'state': 'close'})", "def hlpframeclear(self):\r\n \r\n self.menubar.entryconfig(\"File\", state = 'normal')\r\n self.menubar.entryconfig(\"Help\", state = 'normal')\r\n self.hlpframe.place_forget()", "def Close(self):\n self._is_open = False\n def closure(pane):\n if pane.IsShown():\n pane.Show(False)\n self._PaneInfoOperation(closure)", "def show_hide_model_options(n_clicks, is_open):\n if n_clicks:\n return not is_open\n return is_open", "def closeEvent(self, event):\n\n\t\tevent.ignore()\n\t\tself.hide()\n\t\tself.__sys_tray_icon.show()", "def on_cerrar_clicked(self,Button):\n main=Main.Main(self.tipo, self.id,self.nombre )\n self.set_visible(False)", "def __minimize_on_click(self):\n self.close()", "def on_ur_close_launch_btn_clicked(self):\n ur_type = self.ur.urtype\n # print(ur_type)\n close_roslaunch(ur_type)\n self.set_ur_info_txt(\"close launch \" + ur_type )\n # self.ur.Init_node(ur_type)\n self.set_ur_eepos_btns_bool(False)\n self.set_roslaunch_btn(False)\n self.set_ur_related_btns_bool(False)\n # self.ur_launch_btn()", "def close_without_saving(self, _emitter, _):\n self.settings_window.hide()\n return True", "def HideMe(self, event):\n self.Hide()", "def OnFrameClose(self, event):\r\n\t\tself.Hide()", "def hide_main_buttons(self):\n pass", "def goto_menu(self, *args):\n self.manager.current = 'Main Menu'\n self.reset()\n self.manager.reset()", "def fileMenuActions( self, action ):\n\tif (action.text() == 'Exit'):\n self.app.closeAllWindows()", "def aboutClose(self):\r\n self.tlAbout.destroy()", "def create_menu():", "def deactivate(self, newmode = None):\n\t\tself.urmaswin.Show(0) \n\t\tself.urmaswin.enableRendering(0) \n\t\tself.urmaswin.controlpanel.Show(0)\n\t\tself.visualizer.sliderWin.SetDefaultSize(self.origSliderWinSize)\n\n\t\tif not self.doLockSliderPanel and newmode != \"3d\":\n\t\t\tprint \"\\n\\n*** DEACTIVATING ANIMATOR\\n\"\n\t\t\tself.visualizer.setCurrentSliderPanel(self.visualizer.sliderPanel) \n\t\t\tself.visualizer.sliderPanel.Show(1)\n\t\tif newmode != \"3d\":\n\t\t\tself.menuManager.mainToolbar.EnableTool(MenuManager.ID_ADJUST, 1)\n\t\t\tself.menuManager.mainToolbar.EnableTool(MenuManager.ID_RESTORE, 1)\n\t\t\tself.menuManager.mainToolbar.EnableTool(MenuManager.ID_COLOCALIZATION, 1)\n\t\t\tself.menuManager.mainToolbar.EnableTool(MenuManager.ID_COLORMERGING, 1)\n\t\tself.urmaswin.cleanMenu()", "def menu():\n logout_user()\n return render_template('menu.html')", "def on_aboutMenuitem_activate(self, menuitem):\n\n response = self.aboutDialog.run()\n self.aboutDialog.hide()", "def Hide(self):\r\n \r\n return self.SetFlag(self.optionHidden, True)", "def go_back(self):\n self.hide()", "def go_back(self):\n self.hide()", "def volver_menu(self, widget, data = None):\n\t\tself.w4.hide()\n\t\tself.w41.hide()\n\t\tself.w5.hide()\n\t\tself.w51.hide()\n\t\tself.w6.hide()\n\t\tself.w7.hide()\n\t\tself.w71.hide()\n\t\tself.w8.hide()\n\t\tself.w81.hide()\n\n\t\tself.w3.show_all()", "def hide_editor(self):\r\n self.frame.Hide()", "def goBack(self):\n self.hide()", "def showWindowMenu(self, windowMenu):\n raise RuntimeError('Not implemented')", "def menuItem(*args):\n\toptionsWindow()", "def hide(self):\r\n if self.visible:\r\n nid = (self.hwnd, 0)\r\n Shell_NotifyIcon(NIM_DELETE, nid)\r\n self.visible = 0", "def setMenuMode(*args, **kwargs)->AnyStr:\n pass", "def view_menu_cc_activate(self, widget, data=None):\n if self.camera_control.window.get_property(\"visible\") == True:\n self.camera_control.window.hide()\n else:\n self.camera_control.window.show()", "def hide(self):\n self.window.run_command(\"hide_panel\", {\"panel\": self.full_name})", "def ensure_hidden(self):\n self.set_visible(False)", "def create_menus( self ):", "def hide(self):\n self.root.withdraw()", "def on_btn_volver(self, button):\n self.parent.show_main_menu()", "def onBtnCloseClicked(self):\n self.close()", "def on_cerrar_clicked(self, Button):\n main = Main.Main(self.tipo, self.id, self.nombre)\n self.set_visible(False)", "def addMenu():\n mb.addAction(actionAccessories)\n actionAccessories.setVisible(True)", "def set_navigation(self):\n self.close_button.controlUp(self.kvimvtv_button)\n self.kvimvtv_button.controlDown(self.close_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def back_click(self):\n self.controller.show_user_menu_screen(self.us)", "def onHelpButtonClicked(self, widget):\n self.getGtkTopObject().close()", "def __window_close(self):\n pass", "def closeEvent(self, event):\n aqt.mw.reset()\n super().closeEvent(event)", "def close(self):\n self.props_action.setVisible(False)\n self.cache.last_format = None\n self.cache.last_serial = None\n self.setWindowTitle(\"TCam Capture\")\n self.pixel_coords_label.setText(\"\")\n self.pixel_label.setText(\"\")\n self.current_fps_label.setText(\"\")\n\n if self.props:\n self.props.setParent(None)\n self.props = None\n self.removeDockWidget(self.props)\n\n self.set_device_menus_enabled(False)\n self.setCentralWidget(None)\n self.serial = None\n\n if self.props_widget:\n self.props_widget.stop()\n self.props_widget = None\n\n if self.view is not None:\n self.stop()\n self.view.setParent(None)\n self.view = None\n # update menu to remove mark on open camera\n self.update_device_list(self.device_list)", "def hide_window2(self):\n self.ui_item.Unsplit(self.splitter_window_two.ui_item)", "def shutdown(self):\n # close torn off sub menus\n for menu in self.subMenus:\n if menu.isTearOffMenuVisible():\n menu.hideTearOffMenu()", "def back_clicked(self):\n self.close()", "def toggle(self):" ]
[ "0.6946226", "0.68482953", "0.66403145", "0.6547416", "0.651479", "0.64087963", "0.6365546", "0.6273542", "0.6257272", "0.6194157", "0.6145123", "0.6080416", "0.60505515", "0.60492814", "0.60181034", "0.60166675", "0.60109", "0.60109", "0.60109", "0.6010135", "0.6009737", "0.6005847", "0.5977247", "0.5973356", "0.5973356", "0.5972764", "0.5972764", "0.59617203", "0.5959333", "0.595441", "0.5948244", "0.59297067", "0.59162635", "0.59089345", "0.58990663", "0.589159", "0.5881244", "0.587484", "0.5825288", "0.581849", "0.58160555", "0.5807381", "0.5787298", "0.57736623", "0.5752419", "0.5750829", "0.5750646", "0.57455397", "0.57442987", "0.5727385", "0.5718398", "0.57112426", "0.57047355", "0.5702975", "0.5695402", "0.5686329", "0.5662834", "0.5656859", "0.56538284", "0.56451696", "0.5635275", "0.5631446", "0.5620437", "0.5614019", "0.56138676", "0.5612008", "0.5609878", "0.5609043", "0.56025684", "0.5600719", "0.5600172", "0.56000113", "0.55979204", "0.55979204", "0.5594523", "0.5591484", "0.5590192", "0.55901384", "0.5586109", "0.5575705", "0.557361", "0.5569403", "0.5566546", "0.55607426", "0.5556776", "0.5555603", "0.55474466", "0.55451983", "0.5543074", "0.5540413", "0.5532731", "0.5532563", "0.55208826", "0.55198026", "0.55190545", "0.5515148", "0.5505653", "0.5499314", "0.54976153", "0.5497446" ]
0.61704594
10
extract each line of text from the specified text file
def load_links(self) -> Tuple[List[str], List[str]]: with open(URL_FILE, 'r') as txt_file: lines = txt_file.read().split() urls = [] for line in lines: urls.append(line.split(',')[0]) return lines, urls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_text_file(file_path):\n with open(file_path, 'r') as f:\n for line in f:\n line = line.rstrip()\n if not line:\n continue\n yield line", "def read_text_file(file_name):\n target_file = open(file_name)\n lines = target_file.readlines()\n\n target_file.close()\n return lines", "def _read_txt(file_path):\n translation_pairs = []\n with file_path.open() as f:\n for line in f:\n translation_pairs.append(\n evaluation.TranslationPair(source=None, translation=line.strip())\n )\n return translation_pairs", "def load_txt(filename, **kwargs):\n with sys_open(filename, 'r', **kwargs) as f:\n return f.readlines()", "def read_text_file(filename):\n try:\n file = open(filename, 'r')\n except:\n print('Cannot read file ' + filename + '. Please check the path', file=sys.stderr)\n sys.exit(1)\n output = []\n \n for line in file:\n line = line.strip().lower()\n output.append(line)\n return output", "def load_text_file(file_path: str):\n with open(file_path) as f:\n content = f.readlines()\n return content", "def read_file(filename):\r\n\r\n print(\"Reading TextFile \" + filename)\r\n text = []\r\n with open(filename, encoding=\"utf8\") as file:\r\n lines = file.readlines()\r\n for line in lines:\r\n line = line.strip()\r\n text.append(line)\r\n return text", "def extract_lines(infile):\n with open(infile, 'r') as src:\n return read_on(get_line, src)", "def read_filename(self, filename):\r\n self.text_lines = task3.read_text_file(filename)", "def parse_plain_text_export(text_file):\n\n text_file.seek(0)\n for line in text_file.readlines():\n urls = re.findall(URL_REGEX, line) if line.strip() else ()\n for url in urls:\n yield {\n 'url': url,\n 'timestamp': str(datetime.now().timestamp()),\n 'title': None,\n 'tags': '',\n 'sources': [text_file.name],\n }", "def simple_text_reader(text_file):\n with open(text_file, 'rt') as file:\n data = file.read()\n return data", "def _process(self, file: bytes) -> List[Tuple[str]]:\n decoded_text = file.decode('utf-8')\n # Replace end of line tokens\n if self.eol is not None and not self.split_by_sentence:\n decoded_text = decoded_text.replace('\\n', self.eol)\n\n # Split by sentence or unroll\n if self.split_by_sentence:\n nltk.download('punkt', quiet=True)\n text = [(sent.strip(),) for sent in nltk.tokenize.sent_tokenize(decoded_text)]\n else:\n text = [(decoded_text,)]\n\n return text", "def read_txt(cls, input_file):\n return open(input_file, \"r\", encoding=\"utf-8\").readlines()", "def read_txt(cls, input_file):\n return open(input_file, \"r\", encoding=\"utf-8\").readlines()", "def read_text(self, file_name, strip=True):\n fh = open(file_name, 'r')\n lines=[]\n for line in fh.readlines():\n if strip:\n lines.append(line.strip())\n else:\n lines.append(line)\n fh.close()\n return lines", "def read_text(filename):\n\n if '.txt' not in filename:\n raise ValueError('Input file must be a .txt file!')\n\n if filename[:4] == \"http\": # website\n website = urlopen(filename)\n # slicing to get rid of project gutenberg preamble/license\n txt = website.read().decode('UTF-8').lower()[800:-19500]\n else: # text file\n f = open(filename)\n txt = f.read().lower()\n\n # strip punctuation. Have switched hyphens to a capital letter and back so that they do not get removed.\n translator = txt.maketrans('--', ' ')\n txt = txt.translate(translator)\n translator = txt.maketrans('-', 'A')\n txt = txt.translate(translator)\n translator = txt.maketrans(\"\\n\\r\\t\", ' '*3)\n txt = txt.translate(translator)\n translator = txt.maketrans('', '', string.punctuation + \"'`’‘”“\")\n txt = txt.translate(translator)\n translator = txt.maketrans('A', '-')\n txt = txt.translate(translator).split(' ')\n\n return [s for s in txt if s !='']", "def load_file(file):\r\n\r\n try:\r\n with open(Path(file), \"r\", encoding=\"utf-8\", newline=\"\") as f:\r\n txt_file = f.read()\r\n except:\r\n sys.exit(\"IO_Tools: ERROR: \"+str(file)+\" not found!\")\r\n \r\n lines = txt_file.split(\"\\n\")\r\n\r\n return lines", "def read_file(self, file_name: str):\n file_text = []\n with open(file_name, encoding='utf-8', errors='ignore') as file:\n for line in file:\n line = line.strip()\n file_text.append(line)\n return file_text", "def getlistfromtext(self,filename):\n l=[]\n\n if self.encoding:\n f = codecs.open(filename,\"r\",encoding=self.encoding)\n for line in f:\n l.append(line.rstrip())\n f.close()\n\n else:\n f = open(filename,\"r\")\n for line in f:\n l.append(line.rstrip())\n f.close()\n return l", "def open_text_file(filepath):\n sentences = []\n sentencemanager = nmea.NMEASentenceManager()\n for line in open_file_generator(filepath):\n sentencemanager.process_sentence(line)\n sentences.append(line)\n return sentencemanager, sentences", "def txt_line_iterator(path):\n with tf.io.gfile.GFile(path) as f:\n for line in f:\n yield line.strip()", "def _process(self, file: bytes) -> List[Tuple[str]]:\n decoded_text = file.decode('utf-8')\n decoded_lines = decoded_text.split('\\n')\n\n # Remove titles of Wikipedia articles if desired\n if self.remove_headers:\n filtered_lines = []\n for line in decoded_lines:\n line_strip = line.strip()\n if len(line_strip) > 0:\n if line_strip[0] != '=' and line_strip[-1] != '=':\n filtered_lines.append(line)\n decoded_lines = filtered_lines\n\n eol = self.eol or ''\n if self.split_by_line:\n text = [(line.lstrip() + eol,) for line in decoded_lines]\n else:\n text = [(eol.join(decoded_lines),)]\n\n return text", "def read_file(file_path):\n\n text = ''\n with open(file_path, 'r') as file:\n for line in file.readlines():\n text += line\n return text", "def readTextFromFile(self, filename):\r\n f = open(filename)\r\n self.text = f.read()\r\n f.close()", "def readText(self, filename, firstLine = 0, lastLine = None):\n \n assert filename.endswith('.txt')\n file = open(filename, 'r')\n self.samples = []\n\n li = 0\n while li < firstLine:\n if not file.readline():\n return\n li += 1\n\n while lastLine == None or li < lastLine:\n line = file.readline()\n if not line:\n return\n li += 1\n line = line.strip()\n if line:\n columns = line.split('|')\n if columns[1] == 'client-fps':\n self.samples.append(Sample(line, columns))", "def read_txt(path):\n with open(path, \"r\") as f:\n return f.read().splitlines()", "def read_txt(path):\n with open(path, \"r\") as f:\n return f.read().splitlines()", "def readlines(filename, encoding='utf-8'):\r\n text, encoding = read(filename, encoding)\r\n return text.split(os.linesep), encoding", "def read_file_unlabeled(filename):\n\n sentences = open(filename).read().strip().split(\"\\n\\n\") #separate tweets\n ret = []\n for sent in sentences:\n lines = sent.split(\"\\n\") #each word in the tweet\n ret.append( (lines) )\n return ret", "def read_text_file(file_name):\n \n file_data = {}\n \n with open(file_name) as fp:\n lines = fp.readlines()\n for line in lines:\n lineno = line.strip().split(':')[0].strip()\n #here we are checking whether a particluar line in the file contains a valid data [i.e line number and content]\n try:\n content = line.strip().split(':')[1].strip()\n file_data[lineno] = content\n except IndexError:\n pass\n \n return file_data", "def process_file(file_name):\n f_in = open(file_name)\n return list(map(lambda s: s.strip(), f_in.readlines()))", "def process_file(filename, skip_header=True):\n hist = {}\n fp = file(filename)\n fullwordlist=[]\n # if skip_header:\n # skip_gutenberg_header(fp)\n\n for line in fp:\n holder=process_line(line,hist)\n #print holder\n fullwordlist.extend(holder)\n return fullwordlist", "def read_txt_file(relative_path_to_txt_file: str):\n with open(file=relative_path_to_txt_file) as f:\n lines = f.read()\n return lines", "def read_text_from_file(file_path):\n validate_txt_extension(file_path)\n with open(file_path) as file:\n # Read all the lines from the file, and concatenate into a single string.\n return ''.join([line for line in file])", "def extractText(self, filename):\n file_path = os.path.join(folder_upload, filename)\n file_text = self.textExtractor.get_text(file_path)\n return file_text", "def read_txt(path):\n \n with open(path, \"r\") as f:\n return f.read().splitlines()", "def loadText(self,textFileName):\n textFile = file(textFileName,'rb')\n reHeader = re.compile('^# ([a-zA-Z_0-9]+)')\n id,lines,changed = None,[],[]\n id_records = dict((record.id.lower(),record) for record in self.scripts)\n def unBuffer():\n record = id and id_records.get(id.lower())\n if record:\n code = (''.join(lines)).strip()\n if code.lower() != record.sctx.data.strip().lower():\n record.setCode(code)\n changed.append(id)\n for line in textFile:\n maHeader = reHeader.match(line)\n if maHeader:\n unBuffer()\n id,lines = maHeader.group(1),[]\n elif id: \n lines.append(line)\n textFile.close()\n unBuffer()\n return sorted(changed,key=string.lower)", "def parse_text(text=None, file=None):\n if not text:\n text = open(file).readlines()\n parsed_text = re.split(ARTICLE_TOKEN, text)\n return parsed_text", "def getFileContent(fileName):\n with open(fileName, \"r\") as targets:\n lines = targets.readlines()\n i = 0\n #Remove the \\n\n for line in lines:\n lines[i] = line.rstrip()\n i += 1\n return lines", "def extract_sentences(file_path):\n\n with open(file_path, \"r\") as file:\n\n lines = list()\n\n for line in file:\n line_stripped = line.strip()\n\n if line_stripped == \"\":\n continue\n\n lines.append(line_stripped)\n\n text = \" \".join(lines)\n sentences = token_to_sentence(text)\n\n return sentences", "def parse(filename):\n with open(filename) as file:\n lines = [line.strip() for line in file]\n return lines", "def processText(text):\n print(type(text))\n for line in text:\n print(line)\n return text", "def kitti_readlines(filename):\n with open(filename, 'r') as f:\n lines = f.read().splitlines()\n return lines", "def read_lines(filename):\n with file(filename) as f:\n for line in f:\n _line = line.strip()\n if _line:\n yield _line", "def read_txt(filename):\n file_object = open(filename, 'r')\n file_as_string = file_object.read()\n return create_word_list(file_as_string)", "def process_raw_phrases(file_path):", "def get_file_lines(filename):\n\n with open(filename, \"r\") as lines:\n lines = lines.readlines() # Saves list of each poem line in lines\n\n for _ in range(len(lines)):\n lines[_] = lines[_].rstrip() # Removes newline char from right-side end of each poem line\n\n return lines", "def get_list(file_name):\n with open(file_name, \"r\", encoding=\"latin-1\") as file:\n text = file.read()\n text = text.lower() # Make everything lowercase\n text = text.split(\"\\n\")\n return text", "def get_lines(file_name):\n phrase_dict = {}\n file_text = file_name.readlines()\n for line_index, line in enumerate(file_text):\n if line.startswith('msgid '):\n line = line[7:-2]\n if line:\n phrase_dict[line_index] = line\n return phrase_dict, file_text", "def load_lines(filename):\r\n lines = []\r\n f = open(filename)\r\n for line in f.readlines():\r\n line = line.strip()\r\n lines.append(line)\r\n return lines", "def process_f(filename, order=3):\n\n fp = open(filename) # open file\n\n for line in fp: # for each line of text in file: do something\n for word in line.rstrip().split(): # for each word in each line: strip \\r and split into tuple\n process_word(word, order) # for tuple, call process word()\n fp.close() # close file", "def read_processed_data_from_file(file, encoding='latin1'):\n\n with open(file, encoding=encoding) as f:\n raw = f.read()\n\n lines = raw.split('\\n')\n labeled_texts = []\n n = len(lines) - 1\n for i, line in enumerate(lines):\n print(f'\\rLoading review {i} of {n}', end='')\n if line == '':\n continue\n tagged_words = re.findall(r'(.+?\\\\.+?) ', line)\n label = re.findall(r'#(\\d+.\\d)#', line)[0]\n labeled_texts.append((tagged_words, label))\n print()\n return labeled_texts", "def process_textfile(inf):\n list_of_urls_to_check = [line.rstrip() for line in inf.readlines()]\n return list_of_urls_to_check", "def loadText(self,textFileName):\n #--Text File\n infoKey = None\n text = None\n texts = {}\n reHeader = re.compile('^#')\n reInfo = re.compile('@ +(\\d) +\"(.+?)\" +(\\d+)')\n reSingleQuote = re.compile('[\\x91\\x92]')\n reDoubleQuote = re.compile('[\\x93\\x94]')\n reEllipsis = re.compile('\\x85')\n reEolSpaces = re.compile(r' +\\r\\n')\n reExtraSpaces = re.compile(r' +')\n reIllegalChars = re.compile(r'[@#]')\n #--Read file\n textFile = file(textFileName,'rb')\n for line in textFile:\n if reHeader.match(line): continue\n maInfo = reInfo.match(line)\n if maInfo:\n infoKey = (int(maInfo.group(1)),maInfo.group(2),maInfo.group(3))\n texts[infoKey] = text = []\n else:\n text.append(line)\n textFile.close()\n #--Strip and clean texts\n updated = []\n unmatched = []\n trimmed = {}\n for infoKey in texts.keys():\n if infoKey not in self.infos:\n unmatched.append(infoKey)\n continue\n text = ''.join(texts[infoKey])\n #--Required Subs\n text = text.strip(' \\r\\n')\n text = reSingleQuote.sub('\\'',text)\n text = reDoubleQuote.sub('\"',text)\n text = reEllipsis.sub('...',text)\n text = reIllegalChars.sub('',text)\n #--Optional subs\n text = reEolSpaces.sub('\\r\\n',text)\n text = reExtraSpaces.sub(' ',text)\n #--Trim?\n if len(text) > 511:\n trimmed[infoKey] = (text[:511],text[511:])\n text = text[:511]\n info = self.infos[infoKey]\n if text != info.text:\n info.text = text\n info.setChanged()\n updated.append(infoKey)\n #--Report\n buff = cStringIO.StringIO()\n for header,infoKeys in ((_('Updated'),updated),(_('Unmatched'),unmatched)):\n if infoKeys:\n buff.write('=== %s\\n' % (header,))\n for infoKey in infoKeys:\n buff.write('* %s\\n' % (infoKey,))\n if trimmed:\n buff.write('=== %s\\n' % (_('Trimmed'),))\n for infoKey,(preTrim,postTrim) in trimmed.items():\n buff.write(`infoKey`+'\\n'+preTrim+'<<<'+postTrim+'\\n\\n')\n return buff.getvalue()", "def read_file_in_lines(filename):\r\n\twith open(filename) as infile:\r\n\t\tlines = infile.readlines()\r\n\treturn [line.strip() for line in lines]", "def parseOutText(f):\n\n\n f.seek(0) ### go back to beginning of file (annoying)\n all_text = f.read()\n\n ### split off metadata\n content = all_text.split(\"X-FileName:\")\n words = \"\"\n if len(content) > 1:\n ### remove punctuation\n text_string = content[1].translate(str.maketrans(\"\", \"\", string.punctuation))\n\n ### split the text string into individual words\n words = text_string.split()\n\n return words", "def load_text(filename):\n\n return \" \".join(list(\n map(\n lambda word: word.strip(), open(filename))))", "def process_data(file_object: TextIO) -> list:\n text_list = [line.replace('\\n', '').split(' ') for line in file_object]\n return text_list", "def read_file(file):\n text = []\n with open(file, newline='') as f:\n reader = csv.reader(f)\n next(reader, None) # skip header row\n for row in reader:\n text.append(row)\n return text", "def _get_transcript_entries(transcript_directory):\n transcript_files = iglob_recursive(transcript_directory, '*.trans.txt')\n for transcript_file in transcript_files:\n with open(transcript_file, 'r') as f:\n for line in f:\n # Strip included new line symbol\n line = line.rstrip('\\n')\n\n # Each line is in the form\n # 00-000000-0000 WORD1 WORD2 ...\n splitted = line.split(' ', 1)\n yield splitted", "def split_txt_file(filename: str) -> List[str]:\n if not filename.endswith('.txt'):\n raise ValueError('File extension must be txt')\n\n with open(filename, encoding='cp1251') as txt_file:\n return drop_empty_lines(txt_file.readlines())", "def extract_text(fname):\n\n laparams = pdfminer.layout.LAParams()\n for param in ('all_texts', 'detect_vertical', 'word_margin', 'char_margin', 'line_margin', 'boxes_flow'):\n paramv = locals().get(param, None)\n if paramv is not None:\n setattr(laparams, param, paramv)\n\n # send output to a string stream\n outfp = io.StringIO()\n\n with open(fname, 'rb') as fp:\n pdfminer.high_level.extract_text_to_fp(fp, outfp=outfp, codec='utf-8',\n laparams=laparams, pages=0)\n\n return outfp.getvalue()", "def data_extract(self, file):\n\n file_data = [row.strip().split() for row in open('data/{}'.format(file)).readlines()]\n return file_data", "def _lines(filename):\n \n handle = gzip.open(filename, 'rt') if _gz(filename) else open(filename)\n for line in handle:\n if not line.startswith('#'):\n yield line.strip().split('\\t')", "def loadText(self,filePath):\n ins = file(filePath,'r')\n reComment = re.compile(r\"#.*\")\n reSection = re.compile(r'@ +(srcmod|replace)',re.M)\n reReplace = re.compile(r\"(\\w[-\\w ']+)\\s*:\\s*(.+)\")\n reNewIds = re.compile(r\",\\s*\")\n mode = None\n for line in ins:\n line = reComment.sub('',line.strip())\n maSection = reSection.match(line)\n if maSection:\n mode = maSection.group(1)\n elif not line: #--Empty/comment line\n pass\n elif mode == 'srcmod':\n self.srcModName = line\n elif mode == 'replace':\n maReplace = reReplace.match(line)\n if not maReplace: continue\n oldId = maReplace.group(1)\n self.newIds[oldId.lower()] = reNewIds.split(maReplace.group(2))\n ins.close()", "def read_txt_by_lines(path):\n\n with open(path, \"r\") as f:\n buffer = f.read().splitlines()\n f.close()\n return buffer", "def open_and_read_file(file_path):\n\n text_file = open(file_path)\n full_text = text_file.read()\n\n return full_text", "def read_lines_from_file(filename):\n with open(filename) as f:\n content = f.readlines()\n\n content = [x.strip() for x in content]\n return content", "def get_lines_from_file(fname, context=None):\n content = []\n if context and context.ddboost:\n contents = get_lines_from_dd_file(fname, context.ddboost_storage_unit)\n return contents\n else:\n with open(fname) as fd:\n for line in fd:\n content.append(line.strip('\\n'))\n return content", "def _read_data(self, txtfile):\n data_string = open(txtfile,'r').read()\n return data_string", "def read_list(file_name):\n with open(file_name, 'r') as f:\n text = f.read().splitlines()\n return text", "def get_text_list_from_raw_txt_file(data_root=\"MP4_download\"):\n data_root = pathlib.Path(data_root)\n all_txt_data_paths = [str(path) for path in\n list(data_root.glob('*/*/*.txt'))] # [MP4_download/360VR/89422838/89422838.txt,...]\n text_list = []\n for text_data_path in all_txt_data_paths:\n description_information_dict = eval(open(text_data_path).read())\n txt_brief = description_information_dict['mp4_txt_brief']\n text_list.append(txt_brief)\n return text_list", "def get_file_text(file_name):\n\tf = open(file_name, 'r')\n\ttext = f.read()\n\treturn text", "def readfile(filename):\n try:\n with open(filename, \"r\") as file:\n text = file.readlines()\n for i in range(len(text)):\n text[i] = text[i].rstrip()\n return text\n except:\n print(\"Error readfile()\")", "def getText(filename):\n\n infile = open(filename, 'r')\n text = infile.read()\n infile.close()\n\n return text", "def read_txt(txtfile):\n with open(txtfile, \"r\", encoding=\"utf8\") as infile: \n text = infile.read()\n #print(text[0:100])\n return text", "def read_file_lines(afile):\n with open(afile, 'r') as f:\n lines = f.read()\n return lines.splitlines()", "def load_from_txt(path):\n with open(path) as file:\n data = [line.rstrip() for line in file]\n return data", "def get_text_from_file(filepath):\n with open(filepath, 'r') as f:\n return f.read()", "def read_lines(filename, verbose=True):\n with open(filename, 'r') as fp:\n lines = fp.readlines()\n if verbose:\n print(\"Done reading file\", filename)\n \n return [line.strip() for line in lines]", "def read_text_file(self, filepath: str):\n with open(filepath) as fh:\n for line in fh:\n for word in re.split('\\W+', line):\n word = word.lower()\n if len(word):\n l = self.hash_map.lookup(word)\n self.hash_map.insert(word, l + 1 if l > 0 else 1)", "def _get_line_strings_from_xml_file(filename: str) -> List[str]:\n xml_root_element = ElementTree.parse(filename).getroot() # nosec\n xml_line_elements = xml_root_element.findall(\"handwritten-part/line\")\n return [el.attrib[\"text\"].replace(\"&quot;\", '\"') for el in xml_line_elements]", "def lines(text):\n return [l.strip() for l in text.strip().splitlines() if l.strip()]", "def extract(self, filename, tu):\r\n it = tu.get_tokens(extent=tu.get_extent(filename, (0, int(os.stat(filename).st_size))))\r\n\r\n while True:\r\n try:\r\n self.extract_loop(it)\r\n except StopIteration:\r\n break", "def file_reader(filePath):\n try:\n word_file = open(filePath, \"rt\")\n word_list = word_file.read().splitlines()\n word_file.close()\n return word_list\n except Exception:\n print(f\"An error has occured when reading the file.\")\n\n return", "def parse_file(self, file_path) -> list:\n data = []\n with open(file_path, 'rb') as f:\n lines = pickle.load(f)\n for line in lines:\n input, output = line\n if input.strip() == \"\" or output.strip() == \"\":\n continue\n input_len = len(input.split())\n output_len = len(output.split())\n if input_len > 50 or output_len > 50:\n continue\n data_item = Text2TextDataItem(input_text=input, output_text=output, tokenizer=self.tokenizer,\n share_vocab=self.share_vocab)\n data.append(data_item)\n return data", "def get_file_contents(file_name):\n\n\tf = open(file_name)\n\tlines = f.readlines()\n\tf.close()\n\treturn lines", "def import_text(file):\n\n # Only use alpha-numeric words from file\n with open(file=file, mode='r') as text:\n word_list = [word for word in text.read().split() if word.isalnum()]\n return word_list", "def get_text(filename):\n with open(filename, 'r', encoding='utf-8') as file:\n file_text = file.read()\n return file_text", "def read_file(tp, ignoreincludes):\n ret = []\n filename, f = tp\n\n accumulate = \"\"\n for lineno, line in enumerate(f):\n lineno = lineno + 1 # number from 1\n line = line.strip()\n if not line: # preserve blanks\n ret.append((line, filename, lineno))\n continue\n if line.endswith(\"\\\\\"):\n accumulate += line[0:-1]\n continue\n elif accumulate:\n line = accumulate + line\n accumulate = \"\"\n\n if line:\n line = apply_macros(line)\n\n line = line.strip()\n\n if not line:\n continue\n\n try:\n if line.startswith(\"<\") and line.endswith(\">\"):\n if line.startswith(\"<include\"):\n if not ignoreincludes:\n line = line[1:-1]\n line = line[7:].strip()\n line = line.strip('\"')\n ret.extend(read_file(\n searching_open(line),\n ignoreincludes))\n else:\n ret.append((line, filename, lineno))\n elif line.startswith(\"<transform\"):\n line = line[1:-1]\n add_transform(line, filename, lineno)\n else:\n raise RuntimeError(_(\"unknown command {0}\").format(\n line))\n else:\n ret.append((line, filename, lineno))\n except RuntimeError as e:\n error(_(\"File {file}, line {line:d}: {exception}\").format(\n file=filename,\n line=lineno,\n exception=e),\n exitcode=None)\n raise RuntimeError(\"<included from>\")\n\n return ret", "def parse_txt_file(txtfile):\n array = np.genfromtxt(txtfile)\n return array", "def read_file(filename):\n\n sentences = open(filename).read().strip().split(\"\\n\\n\") #separate tweets\n ret = []\n for sent in sentences:\n lines = sent.split(\"\\n\") #each word in the tweet\n pairs = [L.split(\"\\t\") for L in lines] #Funniest O\n tokens = [tok for tok,tag in pairs]\n tags = [tag for tok,tag in pairs]\n ret.append( (tokens,tags) )\n return ret", "def get_text_lines(instText):\n\n # Find out which part this is\n part = instText.part\n # Get the necessary parameters: lng, ext, dir\n sLng = part.corpus.get_lng_display()\n sDir = part.dir\n sName = instText.fileName\n sFormat = instText.get_format_display()\n # Now try to get the information\n oBack = get_crpp_text(sLng, sDir, sFormat, sName)\n # Prepare what we return\n if oBack == None or oBack['status'] == 'error':\n return None\n else:\n return oBack", "def _gen_txt_data(self, f):\n\t\treader = iter(f)\n\n\t\tfor line_num, line in enumerate(reader):\n\t\t\tif line_num == 0 and self.has_header:\n\t\t\t\tcontinue\n\n\t\t\tdatum = line.rstrip('\\r\\n')\n\n\t\t\tyield datum, line_num+1", "def parse_file(self, file_lines):\n # separate the file into chunks of text\n chunks, chunk = [], []\n # check to see what format the corpus is in, we assume that the headers are the same for all\n # texts in the file... (maybe not safe?)\n if re.match('Primary publication:', file_lines[0]):\n header = re.compile('Primary publication:')\n else:\n header = re.compile(r'&?P\\d{6}')\n for line in file_lines:\n if header.match(line):\n if len(chunk) > 0: # pylint: disable=len-as-condition\n chunks.append(chunk)\n chunk = [line]\n else:\n if len(line) > 0: # pylint: disable=len-as-condition\n chunk.append(line)\n chunks.append(chunk)\n self.chunks = chunks\n # create a rich catalog from the chunks\n re_translit = re.compile(r'(\\d+\\'?\\.) ?(.*)')\n re_normaliz = re.compile(r'(#tr\\.ts:) ?(.*)')\n re_translat = re.compile(r'(#tr\\.en:) ?(.*)')\n for chunk in self.chunks:\n text = chunk\n if chunk[0].startswith('Primary publication:'):\n # we've got full metadata, add additional parsing later\n metadata = chunk[:25]\n text = chunk[26:]\n else: # no metadata\n metadata = []\n pnum = ''.join([c for c in text[0].split('=')[0] if c != '&']).rstrip()\n edition = text[0].split('=')[1].lstrip()\n text = text[3:]\n translit = []\n normaliz = []\n translat = []\n for line in text:\n if re.match(r'\\d+\\'?\\.', line):\n translit.append(re_translit.match(line).groups()[1])\n if line.startswith('#tr.ts:'):\n normaliz.append(re_normaliz.match(line).groups()[1])\n if line.startswith('#tr.en:'):\n translat.append(re_translat.match(line).groups()[1])\n self.catalog[pnum] = {'metadata': metadata,\n 'pnum': pnum,\n 'edition': edition,\n 'raw_text': text,\n 'transliteration': translit,\n 'normalization': normaliz,\n 'translation': translat}", "def loadTextFiles(path):\n\n data = []\n \n for filename in os.listdir(path):\n f=open(path+filename, 'r')\n content = f.read()\n # clean special characters and append\n data.append(re.sub('\\W+',' ', content))\n\n return data", "def get_plain_text_reader(encoding: str = \"utf-8\"):\n def reader(files: List[str]) -> Iterable[List[str]]:\n for path in files:\n\n if path.endswith(\".gz\"):\n with gzip.open(path, 'r') as f_data:\n for line in f_data:\n yield str(line, 'utf-8').strip().split(\" \")\n else:\n with open(path, encoding=encoding) as f_data:\n for line in f_data:\n yield line.strip().split(\" \")\n\n return reader", "def process_to_text(rawfile, txtfile, field: int=None):\n\n if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:\n sacrelogger.info(\"Processing %s to %s\", rawfile, txtfile)\n if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\\\1', line)), file=fout)\n elif rawfile.endswith('.xml'): # IWSLT\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\\\1', line)), file=fout)\n elif rawfile.endswith('.txt'): # wmt17/ms\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip(), file=fout)\n elif rawfile.endswith('.tsv'): # MTNT\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip().split('\\t')[field], file=fout)", "def readText(fileName):\n fileText = \"\"\n with open(fileName,\"r\") as fileObject:\n fileText = fileObject.read()\n \n return fileText", "def parseOutText(f):\n\n\n f.seek(0) ### go back to beginning of file (annoying)\n all_text = f.read()\n ### split off metadata\n \n content = re.split(\"X-FileName:.*$\", all_text, flags=re.MULTILINE, maxsplit=1)\n words = \"\"\n if len(content) > 1:\n text_string = content[1]\n\n ## remove mails that are forwarded or to which are responded\n # e.g. ---------------------- Forwarded\"\n text_string = re.split(\"-*\\sForwarded\", text_string, maxsplit=1)[0]\n\n # -----Original Message-----\n text_string = re.split(\"-*\\Original\\sMessage\", text_string, maxsplit=1)[0]\n\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # To:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n # or\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # to:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n \n text_string = re.split(\"((.*\\n){2})[Tt]o:\\s\", text_string, maxsplit=1)[0]\n\n ### remove punctuation\n # should be autopmatically by scikit learn\n #text_string = text_string.translate(string.maketrans(\"\", \"\"), string.punctuation)\n\n ### project part 2: comment out the line below\n #words = text_string\n\n ### split the text string into individual words, stem each word,\n ### and append the stemmed word to words (make sure there's a single\n ### space between each stemmed word)\n from nltk.stem.snowball import SnowballStemmer\n\n stemmer = SnowballStemmer(\"english\")\n words = [stemmer.stem(word) for word in text_string.split()]\n\n\n\n return \" \".join(words)", "def loadTextFile(self):\n if self.tempFilePath is None or not MyFile.checkFileExists(self.tempFilePath):\n raise Exception(\"Temporary text file does not exist!\")\n\n io = Ioread()\n self.sentencesList = io.readFileContentList(self.tempFilePath)" ]
[ "0.722885", "0.7041475", "0.69846565", "0.6924913", "0.69187284", "0.6857259", "0.68436337", "0.6830081", "0.6812712", "0.68080425", "0.6784779", "0.6754171", "0.6750703", "0.6750703", "0.6634743", "0.6614622", "0.66041446", "0.65703064", "0.65577686", "0.6539482", "0.651655", "0.64905196", "0.6483792", "0.644066", "0.64263165", "0.6421013", "0.6421013", "0.6386781", "0.6378689", "0.6348989", "0.63375527", "0.63257587", "0.632481", "0.63245815", "0.6321023", "0.6319081", "0.62844306", "0.62794185", "0.62777615", "0.6274281", "0.6254687", "0.62536913", "0.62439406", "0.6233798", "0.62190175", "0.6215833", "0.6214939", "0.617896", "0.616457", "0.6160833", "0.6155133", "0.61541295", "0.6142822", "0.6142743", "0.6132537", "0.61253333", "0.6118418", "0.61136466", "0.6111657", "0.6071049", "0.6069902", "0.6068891", "0.60647374", "0.60642123", "0.60569876", "0.60481584", "0.6047565", "0.6044301", "0.60373574", "0.60315245", "0.6023626", "0.6023411", "0.60149723", "0.60042685", "0.5998475", "0.59971786", "0.5996765", "0.5994861", "0.59948355", "0.59828013", "0.5975978", "0.5969727", "0.5969367", "0.5962522", "0.59546167", "0.59537804", "0.59481424", "0.59465224", "0.5942818", "0.59275234", "0.5924394", "0.59216017", "0.592027", "0.5907232", "0.59046966", "0.5904455", "0.589628", "0.5895044", "0.5892751", "0.58913606", "0.5884476" ]
0.0
-1
prints out index, url link
def view_registry(self) -> None: arr = self.load_links()[0] for i,v in enumerate(arr): print(f"<{i}: {v}>\n") pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def printLinks(self):\n self.browsed.sort()\n sys.stderr.write(\"\\n+ \" + _(\"URLs\") + \":\\n\")\n for lien in self.browsed:\n print(lien)", "def index():\n g.data['api_version'] = API_VERSION\n g.data['apilib_version'] = API_VERSION\n g.data['oar_version'] = VERSION\n g.data['links'] = []\n #endpoints = ('resources', 'jobs', 'config', 'admission_rules')\n endpoints = ('resources', 'jobs')\n for endpoint in endpoints:\n g.data['links'].append({\n 'rel': 'collection',\n 'href': url_for('%s.index' % endpoint),\n 'title': endpoint,\n })", "def make_link_to(self, index, caption):\n \n # index is an int\n return '<a href=\"/log/'+str(index)+'\"> '+caption+' '+str(index)+'</a>'", "def index():\n return render_template('index.html', name=urlpath)", "def index():\n return (\n f\"Welcome to my Hawaii trip info!<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def index():\n pass", "def index():\n return 'There is nothing here.'", "def url(index):\n return ALEXA_MAP[index]", "def index():\n isoko = get_sources('general')\n\n head = \"ALLOVER THE WORLD NEWS\"\n return render_template('index.html', title = head, isoko = isoko)", "def index():\n #Getting source\n sources = get_sources()\n print(sources)\n return render_template('index.html', sources = sources)", "def home_page():\n return \"<h4>Welcome !</h4><br><a href='/fetch'>View Results</a>\"", "def print(self):\n self._print_title_and_url(self.index, self.title, self.url)\n self._print_metadata_and_abstract(self.abstract, metadata=self.metadata)", "def test_index():\n result = views.index(testing.DummyResource(), testing.DummyRequest())\n\n # Pyramid's host url defaults to http://example.com\n host = 'http://example.com'\n links = result['links']\n assert links['annotation']['create']['method'] == 'POST'\n assert links['annotation']['create']['url'] == host + '/annotations'\n assert links['annotation']['delete']['method'] == 'DELETE'\n assert links['annotation']['delete']['url'] == host + '/annotations/:id'\n assert links['annotation']['read']['method'] == 'GET'\n assert links['annotation']['read']['url'] == host + '/annotations/:id'\n assert links['annotation']['update']['method'] == 'PUT'\n assert links['annotation']['update']['url'] == host + '/annotations/:id'\n assert links['search']['method'] == 'GET'\n assert links['search']['url'] == host + '/search'", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"<a href='/api/v1.0/precipitation'>Precipitation</a><br/>\"\n f\"<a href='/api/v1.0/stations'>Stations</a><br/>\"\n f\"<a href='/api/v1.0/tobs'>Temperature</a><br/>\"\n f\"<a href='/api/v1.0/start'>Start Date</a><br/>\"\n f\"<a href='/api/v1.0/start/end'>End Date</a><br/>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"<a href='/api/v1.0/precipitation'>precipitation</a><br/>\"\n f\"<a href='/api/v1.0/stations'>stations</a><br/>\"\n f\"<a href='/api/v1.0/tobs'>tobs</a><br/>\"\n f\"<a href='/api/v1.0/tobs/start_date'>tobs/start_date</a><br/>\"\n f\"<a href='/api/v1.0/tobs/start_date/end_date'>tobs/start_date/end_date</a><br/>\"\n )", "def print_indices(self):\n # Putting the param in the endpoint here because why not\n endpoint = \"/_cat/indices?v\"\n url = self.base_url + endpoint\n r = requests.get(url, headers=self.headers, verify=False)\n r.raise_for_status()\n print(r.text)\n return", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs\"\n )", "def home():\n return (\n f\"These are the available routes:</br>\"\n f\"/api/v1.0/precipitation</br>\"\n f\"/api/v1.0/stations</br>\"\n f\"/api/v1.0/tobs</br>\"\n f\"/api/v1.0/< start ></br>\"\n f\"/api/v1.0/< start >/< end ></br>\"\n )", "def index():\n\n return {\n 'page': 'index',\n }", "def get(self):\n self.response.out.write(\"There's nothing to see here. How 'bout a \"\n \"<a href='/'>puzzle</a>?\")", "def index():\n return render_template('0-index.html')", "def make_navbar_for_homepage(self):\n links = [\n \"home\", [\"Result Pages\", self._result_page_links()], \"Version\"\n ]\n if len(self.samples) > 1:\n links[1][1] += [\"Comparison\"]\n if self.publication:\n links.insert(2, \"Publication\")\n if self.gwdata is not None:\n links.append([\"Detchar\", [i for i in self.gwdata.keys()]])\n if self.notes is not None:\n links.append(\"Notes\")\n return links", "def print_res(self, result, index=None):\n if index is not None:\n print(str(index).rjust(3)+ \" \" + _c.bold + _c.blue + result[\"title\"] + _c.reset)\n if result[\"description\"]:\n print(\" \"*4 + \"Description:\\t\", result[\"description\"])\n print(\n \" \"*4 +\n result[\"highlight\"].replace(\"<highlight>\", _c.blue).replace(\"</highlight>\", _c.reset),\n )\n print(\" \"*4 + \"Path: \", result[\"path\"])\n else:\n print(\"Title:\\t\\t\", result[\"title\"])\n if result[\"description\"]:\n print(\"Description:\\t\", result[\"description\"])\n print(result[\"highlight\"])\n print(\"Path: \", result[\"path\"])", "def index():\n entertainment_news = get_sources('entertainment')\n fashion_news = get_sources('fashion')\n title = 'Vnews'\n return render_template('index.html', title=title, fashion=fashion_news, entertainment=entertainment_news)", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/preciptation<br/>\"\n f\"/api/v1.0/Stations\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"<a href='/api/v1.0/2017-01-01'>/api/v1.0/2017-01-01</a><br></p>\"\n f\"<a href='/api/v1.0/2017-01-01/2017-01-07'>/api/v1.0/2017-01-01/2017-01-07</a></p>\"\n \n )", "def index(request):\r\n badRequest(\"Url not found\")", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end><br/>\"\n )", "def index():\n\n\treturn(render_template('index.html'))", "def home_page():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start/end<br/>\"\n )", "def homepage():\r\n print(__name__ + \" invoked\")", "def welcome():\n\n # Assigning links to display the query results\n return (\n f\"<h2>Welcome to My Weather Data API <br> Available Routes:</h2>\"\n f\"<a href='/api/v1.0/precipitation'>Precipitation</a><br>\"\n f\"<a href='/api/v1.0/stations'>Stations</a><br>\"\n f\"<a href='/api/v1.0/tobs'>Tobs</a><br>\"\n f\"<h4>For the two queries below, please input dates between <strong>2010-01-01</strong> and <strong>2017-08-22</strong></h4>\"\n f\"<a href='/api/v1.0/2010-01-01'>01/01/2010</a><br>\"\n f\"<a href='/api/v1.0/2010-01-01/2017-08-22'>01/01/2010 to 08/22/2017</a><br><br>\"\n f\"/api/v1.0/< start ><br>\"\n f\"/api/v1.0/< start >/< end >\"\n )", "def welcome():\n return (\n f'Available Routes: <br/>'\n f'<a href=\"/api/v1.0/precipitation\">/api/v1.0/precipitation</a><br/>'\n f'<a href=\"/api/v1.0/stations\">/api/v1.0/stations</a><br/>'\n f'<a href=\"/api/v1.0/tobs\">/api/v1.0/tobs</a><br/><br/>'\n f'<div> Use following link if you have a date range. \\\n Copy paste the link below after the server address. \\\n Enter the start date in yyyy-mm-dd format <br/>\\\n For example : Enter in following way after the server address<br/>\\\n /api/v1.0/2013-01-01 </div><br/>'\n f'/api/v1.0/<br/><br/><br/><br/>' \n f'<div> Use following link if you have a date range. \\\n Copy paste the link below after the server address. \\\n Enter the start and end date in yyyy-mm-dd format <br/>\\\n For example : Enter in following way after the server address<br/>\\\n /api/v1.0/2013-01-01/2013-12-31 </div><br/>'\n f'/api/v1.0/'\n )", "def print(self, index):\n count=0\n start = self.head\n while start:\n if count==index:\n print(count, ' : ', start.getMember())\n break\n start=start.getLink()\n count+=1", "def index(self):\n\t\treturn render_template('index.html')", "def print_navigation(self, current_index):\n # current_index: current index\n \n # the html string we're going to build up\n html = \"\"\n \n html+='<table width=\"100%\"><tr>'\n \n # returns some HTML that are the navigation links at the bottom of the page\n previous_index = self.get_previous_index(current_index)\n \n if previous_index != -1:\n # not empty, so make a link\n html += '<td align=\"center\">'\n previous_link = self.make_link_to(previous_index, 'Previous')\n html += previous_link+'</td>'\n \n next_index = self.get_next_index(current_index)\n \n if next_index != -1:\n html += '<td align=\"center\">'\n next_link = self.make_link_to(next_index, 'Next')\n html += next_link+'</td>'\n\n html += '</table>'\n return html", "def print_allen_url(df):\n display(HTML('<h4>Some url for your convenience</h4>'))\n display(HTML('<p>For Gene summary:</p>'))\n for gene in df['gene'].unique():\n url = df[df['gene'] == gene]['allen_gene_url'].iloc[0]\n text = f'Go to Allen\\'s {gene} summary page.'\n display(HTML(f'<a href=\"{url}\">{text}</a>'))\n display(HTML('<p>For high resolution ISH images viewer:</p>'))\n for _, row in df.iterrows():\n plane, gene, dataset_id, url = row[['plane', 'gene', 'section_data_set_id', 'allen_viewer_url']]\n text = f'Go to Allen\\'s Viewer for {plane} {gene} experiment (id={dataset_id}).'\n display(HTML(f'<a href=\"{url}\">{text}</a>'))\n display(HTML('<p>For 3D expression viewer '\n '(Need to install <a href=\"http://mouse.brain-map.org/static/brainexplorer\">'\n 'Allen Brain Explorer</a>):</p>'))\n for section_data_set_id in df['section_data_set_id'].unique():\n url = df[df['section_data_set_id'] == section_data_set_id]['allen_3d_grid_url'].iloc[0]\n gene = df[df['section_data_set_id'] == section_data_set_id]['gene'].iloc[0]\n text = f'Go to Brain Explorer for {gene} 3D expressions (id={section_data_set_id}).'\n display(HTML(f'<a href=\"{url}\">{text}</a>'))\n return", "def index():\n return render_template(\"index.html\",\n title='Index')", "def index():\n\n INTERFACE.add_dir(u'RÚV', 'view_category', '1')\n INTERFACE.add_dir(u'RÚV Íþróttir', 'view_category', '10')\n INTERFACE.add_dir(u'RÁS 1', 'view_category', '2')\n INTERFACE.add_dir(u'RÁS 2', 'view_category', '3')\n INTERFACE.add_dir(u'Rondó', 'view_category', 'rondo')\n INTERFACE.add_dir(u'Krakkasarpurinn', 'view_category', 'born')\n INTERFACE.add_dir(u'Hlaðvarp', 'view_podcast_index', '')\n INTERFACE.add_dir(u'Leita', 'search', '')", "def index_site(site, text):\n # YOUR CODE HERE #\n pass # delete this when you write your code", "def index():\n return (\n f\"Welcome to the Climate App API!<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/&lt;start&gt;<br/>\"\n f\"/api/v1.0/&lt;start&gt;/&lt;end&gt;\"\n )", "def show_apis():\n return (\n f\"<h4>Available Routes:</h4>\"\n f'<a href=\"/api/v1.0/ids\">/api/v1.0/ids</a><br/>' \n f'<a href=\"/api/v1.0/info/1286\">/api/v1.0/info/subject_id</a><br/>' \n f'<a href=\"/api/v1.0/subjects\">/api/v1.0/subjects</a><br/>' \n f'<a href=\"/api/v1.0/subjects/1286\">/api/v1.0/subjects/subject_id</a><br/>' \n f'<a href=\"/\"><h4>Back</h4></a><br/>' \n )", "def index():\n return 'OK'", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/2015-01-01<br/>\"\n f\"/api/v1.0/2015-01-01/2015-12-31\"\n )", "def index():\n\treturn render_template(\"index.html\", title=\"Home\")", "def index():\n return 'Thanks for using the Bird Stats API.'", "def search_main() -> None:\n\n logger.info(\"Starting search\")\n links = run_search(grab_search_info())\n if links:\n logger.info(\"Printing links\")\n for key in links:\n print(f\"{key.upper()}: {links[key]}\")", "def generate_index_page(index_links,\r\n index_fp,\r\n order=[_index_headers['run_summary']]):\r\n # get containing directory for index_fp\r\n top_level_dir = split(split(index_fp)[0])[1]\r\n index_page_header = get_index_page_header()\r\n index_lines = [index_page_header]\r\n d = {}\r\n for e in index_links:\r\n try:\r\n d[e[2]].append((e[0], e[1]))\r\n except KeyError:\r\n d[e[2]] = [(e[0], e[1])]\r\n index_lines.append('<table border=1>\\n')\r\n\r\n # Determine the order the data should be presented in. This should be\r\n # the order that the user requested, followed by any categories that\r\n # the user didn't include in the order parameter.\r\n ordered_table_entries = order + [k for k in d if k not in order]\r\n for k in ordered_table_entries:\r\n v = d[k]\r\n index_lines.append(\r\n '<tr colspan=2 align=center bgcolor=#e8e8e8><td colspan=2 align=center>%s</td></tr>\\n' % k)\r\n for description, path in v:\r\n # if path starts with top_level_dir, replace it\r\n # with ./\r\n path = re.sub('^.*%s\\/' % top_level_dir, './', path)\r\n index_lines.append(\r\n '<tr>%s</tr>\\n' %\r\n format_index_link(\r\n description,\r\n path))\r\n index_lines.append('</table>\\n')\r\n\r\n index_page_footer = get_index_page_footer()\r\n index_lines.append(index_page_footer)\r\n\r\n open(index_fp, 'w').write(''.join(index_lines))", "def index():\n title = \"Application process \"\n links = {'mentors': 'mentors',\n 'schools': 'all-school',\n 'mentors_by_country': 'mentors-by-country',\n 'contacts': 'contacts',\n 'applicants': 'applicants',\n 'applicants_and_mentors': 'applicants-and-mentors'}\n menu = ['Show mentors and schools',\n 'Show mentors and all schools',\n 'Show mentors by country',\n 'Show contacts',\n 'Show applicants',\n 'Show applicants and mentors']\n return render_template('index.html', links=links, menu=menu, title=title)", "def index():\n return render_template(\n 'index_t.html',\n call_counter=str(get_model().call_counter),\n app_version=str(app.config.get('GIT_HASH', None))\n )", "def index():\n\n return render_template(\"index.html\"), 200", "def index_url(self):\n return reverse('{}:{}_list'.format(self.label, self.model_map[self.label]))", "def index():\n\n return render_template('index.html', title='Find places on a map!')", "def index_page(data):\n main_index = [html.H1(\"INDEX\"),\n html.Div([html.A(\"Characters\", href=\"/character\"),\n \" | \", html.A(\"Search\", href=\"/search\")]),\n dcc.Loading(html.Img(src=create_wordcloud(data[\"tokens\"]))),\n get_random_quotes(data, 5)]\n\n return main_index", "def print_link(link):\n print('<' + helper(link).rstrip() + '>')", "def _docs():\n url = \"https://vanheeringen-lab.github.io/seq2science\"\n if not webbrowser.open(url):\n print(url)", "def home():\n return(\n f\"Available Routes:<br/>\"\n f\"Precipitation: /api/v1.0/precipitation<br/>\"\n f\"List of Stations: /api/v1.0/stations<br/>\"\n f\"Temperature for one year: /api/v1.0/tobs<br/>\"\n f\"Temperature stat from the start date(yyyy-mm-dd): /api/v1.0/min_max_avg/<start><br/>\"\n f\"Temperature stat from start to end dates(yyyy-mm-dd): /api/v1.0/min_max_avg/<start><br/>\"\n )", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start_date(yyyy-mm-dd)<br/>\"\n f\"/api/v1.0/start_date(yyyy-mm-dd)/end_date(yyyy-mm-dd)<br/>\")", "def index():\n aaa.require(fail_redirect='/login')\n return 'Welcome! <a href=\"/admin\">Admin page</a> <a href=\"/logout\">Logout</a>'", "def get_index(self):\n self.client.get('/')", "def index(self):\n return render(\"/derived/rock/index.mako\")", "def index():\r\n return render_template('index.html')", "def show_routes(self):\n routelist= [(handler.regex.pattern, handler.handler_class) for handler in self.handlers[0][1]]\n print(55*\"-\")\n print(\" Routing table (order matters) :\")\n print(55*\"-\")\n for elem in routelist:\n print('{0:<20} {1:<30} '.format(elem[0], str(elem[1])))", "def index():\n return render_template('index.html'), 200", "def main():\n\n # Title\n st.title(\"AB URL Helper\")\n st.subheader(\"Paste URL link below\")\n\n ####################################################################\n ### User Input Fields ###\n ####################################################################\n\n # First Name Field \n user_url = st.text_input(\"Paste URL Link Here:\",\"\")\n\n ####################################################################\n ### Extract URL Article Info ### \n ####################################################################\n\n # Create a submission button to parse URL information \n if st.button(\"Get URL Info\"):\n url = str(user_url)\n article = Article(url)\n article.download()\n article.parse()\n\n url_title = article.title\n\n authors = article.authors\n url_authors = ''.join(map(str,authors))\n\n date = article.publish_date\n\n st.subheader(\"URL Title:\")\n st.write(url_title) \n st.subheader(\"URL Author(s):\")\n st.write(url_authors)\n st.subheader(\"Date Published:\")\n st.write(date.strftime('%m/%d/%Y'))", "def makeLinks(self):\n self.deleteIndexFileIfExists()\n _fileNames = self.getHTMLFileNames()\n _msgPart1 = \"<a href=\\\"\"\n _msgPart2 = \"\\\" target=\\\"loadHTMLResults\\\">\"\n _msgPart3 = \"</a><br>\"\n _link = \"\"\n for _fileName in _fileNames:\n _origFileName = _fileName\n _linkName = _fileName.split('.')[0]\n _createAnchorTag = (_msgPart1+str(_origFileName)+_msgPart2+str(_linkName)+_msgPart3)\n _link = _link + _createAnchorTag\n return _link", "def on_index(self, handler):\n print \"Server sent index page to {0}.\".format(\n handler.client_address[0]\n )", "def print_instructions():\r\n print(\"LED Control URLs - Try them in your web browser:\")\r\n print(\" On : \" + URL + \"/dweet/for/\" + thing_name + \"?state=on\")\r\n print(\" Off : \" + URL + \"/dweet/for/\" + thing_name + \"?state=off\")\r\n print(\" Blink : \" + URL + \"/dweet/for/\" + thing_name + \"?state=blink\\n\")", "def index():\n return render_template('pages/index.html', isNav=True)", "def index():\n return render_template(\"index.html\", content=songs, titles=titles, contentO=songsO, titlesO=titlesO)", "def show_index_page():\n\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\r\n return render_template('index.html')", "def index():\r\n return render_template('index.html')", "def index(self):\n return render_template('main/index.html')", "def getURLs():", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def display():\n\n #still needs some cleanup on imagry and what the site is about. \n\n return render_template(\"index.html\")", "def index():\n return \"Hello!\"", "def homepage():\n return (\n f\"Welcome to Hawaii - Climate Page<br/>\"\n f\"<br/>\"\n f\"This site has data from 01-01-2010 to 08-23-2017<br/>\"\n f\"<br/>\"\n f\"Available Pages:<br/>\"\n f\"<br/>\"\n f\"<br/>\"\n f\" Station Information<br/>\"\n f\" /api/v1.0/stations<br/>\"\n f\"<br/>\"\n f\" Percipitation Information<br/>\"\n f\" /api/v1.0/percipitation<br/>\"\n f\"<br/>\"\n f\" Temperature Observations<br/>\"\n f\" /api/v1.0/tobs<br/>\"\n f\"<br/>\"\n f\" Start Date information - complete url is '/api/v1.0//yyyy-mm-dd'<br/>\"\n f\" /api/v1.0/start<br/>\"\n f\"<br/>\"\n f\" Start and End Date information - complete url is '/api/v1.0/yyyy-mm-dd/yyyy-mm-dd'<br/>\"\n f\" /api/v1.0/start/end\"\n )", "def Home():\n return(\n f\"Hawaii Climate Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"and<br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def index():\n return 'Welcome to the resource manager!'", "def checkForIndexPage(r):\n soup = BeautifulSoup(r.text, 'lxml')\n head = soup.find('h1')\n if head != None and head.string != None and (\"Index of \" in head.string):\n return \"Shows 'Index Of' page ✘\" \n else:\n return \"Displays properly ✓\"", "def url():\n ...", "def index(request):\n copy = '2018 ' + author\n\n context = dict(author=author, copyright=copy, repo_url=repo_url)\n\n return render(request, 'index.html', context)", "def get(self):\n self.render(\"index.html\")", "def home():\n return\"\"\"<!DOCTYPE><html><h1>List of all available Honolulu, HI API routes</h1><ul>\n <li>List of precipitation scores from the last year:<a href=\"/api/v1.0/precipitation\">/api/v1.0/precipitation</a></li>\n <li>List of stations:<a href=\"/api/v1.0/stations\">/api/v1.0/stations</a></li>\n <li>List of temp observations from the last year:<a href=\"/api/v1.0/tobs\">/api/v1.0/tobs</a></li>\n <li>List of minimum, maximum, and average temperatures for the date provided (replace &ltstart&gt with a date in 'yyyy-mm-dd' format: <a href=\"/api/v1.0/<start>\">/api/v1.0/<start></a></li>\n <li>List of minimum, maximum, and average temperatures for the dates in range provided (replace &ltstart&gt and &ltend&gt with dates in 'yyyy-mm-dd' format): <a href=\"/api/v1.0/<start>/<end>\">/api/v1.0/<start>/<end></a></li>\n </ul></html>\"\"\"", "def print_help():\n print(\"Archive generated report to a web server. e.g.\")\n print(\"rm -rf /cs-shared/contrail_code_coverage/test_coverage\")\n print(\"cp -a build/coverage/controller/test_coverage \" +\n \"/cs-shared/contrail_code_coverage/\")\n print(\"http://10.84.5.100/cs-shared/contrail_code_coverage/test_coverage\")", "def overview():\n pages_list = g.db.pages.find().sort('name')\n return render_template('{}/index.html'.format(MODULE_DIR), **locals() )", "def index():\n return render_template('index.html', title='Home')", "def populate_index(db):\n\tfor url in URL:\n\t\tprint url\n\t\trequest = urllib2.Request(url)\n\t\ttry :\n\t\t\tresponse = urllib2.urlopen(request)\n\t\texcept urllib2.URLError:\n\t\t\tprint \"Network Unreachable \"\n\t\t\tsys.exit()\t\n\t\ttext = html2text(response.read())\n\t\tdb.generate_index(text,url)", "def admin_index():\n return 'Super-seekrit admin page.'", "def index():\n return render_template('index.html', getName=ASK_NAME)", "def index():\n\n page = \"\"\"\n <h1>Calculator</h1>\n <div>Directions. This app will calculate 2 or more numbers provided in the url string. To use:\n <ol>\n <li>Type in http://localhost:8080/</li>\n <li>Type in the arithmetic operation (add, subract, multiply, divide) followed by /</li>\n <li>Type in numbers. Between each number include a /</li>\n <li>For example, http://localhost:8080/add/5/10/</li>\n </ol></div>\n <h2>Tests:</h2><ul>\n <li><a href=\"http://localhost:8080/add/5/10/15\">Addition</a></li>\n <li><a href=\"http://localhost:8080/subtract/100/50/25\">Subraction</a></li>\n <li><a href=\"http://localhost:8080/multiply/5/10/15\">Multiplication</a></li>\n <li><a href=\"http://localhost:8080/divide/100/50\">Division</a></li>\n \"\"\"\n return page", "def getLink(self):", "def index(self) -> HTMLBody:\n\t\treturn render_template(\"index.jinja2\")", "def index(self):\n s = \"\"\n\n sb = []\n for sim in self.simulations.values():\n url = \"{0.uid}/{0.password}/status\".format(sim)\n sb.append(\"<a href='{0}'>{1.uid}</a></br>\".format(\n url, sim))\n s += \"<b>Simulations running:</b></br>\"\n s += \"\\n\".join(sb)\n\n s += \"<b>List of items in shop:</b>\\n</br>\"\n s += \"\\n</br>\".join(self.shop.itemAndCostDict.keys())\n \n s += \"</br><b>List of all items:</b>\\n</br>\"\n s += \"\\n</br>\".join(item.items.keys())\n\n return s", "def customize_index_string(app, url):\n app.index_string = env.get_template(\"dash_layout.html\").render(\n top_menu_items=get_top_menu_items(url)\n )", "def link(self):\n return f\"[{self.numbered_title}]({self.html_url})\"", "def index(self):\n return \"Hello World!\"" ]
[ "0.6662168", "0.63025343", "0.62125635", "0.62048537", "0.6174066", "0.61573774", "0.61266476", "0.60956967", "0.60887426", "0.6074477", "0.60500443", "0.6042261", "0.6034719", "0.6016362", "0.5997981", "0.59627646", "0.59619063", "0.59611434", "0.5957981", "0.59331363", "0.5926642", "0.5926311", "0.59210604", "0.5909969", "0.5898834", "0.58765817", "0.5875511", "0.58690625", "0.58689106", "0.58671975", "0.5848449", "0.584779", "0.5839973", "0.5834947", "0.58343494", "0.5832401", "0.58293205", "0.58198243", "0.58186257", "0.5812097", "0.5806888", "0.58047736", "0.5799421", "0.57696694", "0.57608694", "0.5757579", "0.57563794", "0.575285", "0.5752304", "0.5745809", "0.5738938", "0.573746", "0.57373315", "0.5733721", "0.5730604", "0.5723913", "0.572332", "0.572254", "0.57010174", "0.56936175", "0.5693574", "0.5691953", "0.56902474", "0.56870025", "0.56750387", "0.56727254", "0.5668921", "0.5667529", "0.5664451", "0.56518316", "0.5651349", "0.5650086", "0.56486994", "0.56486994", "0.5644992", "0.56422216", "0.5641653", "0.5641653", "0.5640665", "0.5640473", "0.56334215", "0.5631664", "0.56304884", "0.56301767", "0.5616722", "0.56159186", "0.5611617", "0.56085545", "0.56068665", "0.56047916", "0.55926365", "0.55874014", "0.5579217", "0.5568711", "0.5558244", "0.55561227", "0.55554104", "0.5554241", "0.5553562", "0.55495906", "0.5543245" ]
0.0
-1
this method is called if the item is not a book return cannot be of type str, as later on an iterator incorrectly iterates through each char instead of the string as a whole
def inp_item_price(self) -> List[str]: return [str(input("Enter desired price for item: "))]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_str(item):\n return isinstance(item, str)", "def test_str(self):\n item = self.item\n\n self.assertEqual(str(item), self.item_raw['name'])", "def nonstringiter(obj):\n return not isinstance(obj, string_types) and isinstance(obj, Iterable)", "def _is_good_iterable(obj):\n return _is_iterable(obj) and _has_str_elems(obj)", "def is_string(document):\r\n return isinstance(document, str)", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def _is_iterable_non_string(arg):\n return (hasattr(arg, \"__iter__\") or hasattr(arg, \"__getattr__\")) and not isinstance(arg, str)", "def _is_non_string_iterable(value):\n if isinstance(value, str):\n return False\n if hasattr(value, '__iter__'):\n return True\n if isinstance(value, collections.abc.Sequence):\n return True\n return False", "def Item(self) -> str:", "def Item(self) -> str:", "def Item(self) -> str:", "def is_container(item):\n if isinstance(item, str):\n return False\n elif hasattr(item, \"__iter__\"):\n return True\n\n return False", "def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)", "def __contains__(self, item):\n if not isinstance(item, str):\n raise PyTextCanvasException('string required for left operand')\n return item in str(self)", "def is_iterable_but_not_string(obj):\n return (is_iterable(obj) and not hasattr(obj, 'strip'))", "def is_non_string_iterable(obj: object) -> bool:\n return not (isinstance(obj, str) or isinstance(obj, bytes))\\\n and isinstance(obj, Iterable)", "def is_nonstring_iterable(x):\n if isinstance(x, primitive_iterable):\n return False\n return isinstance(x, collections.Iterable)", "def __contains__(self, item: str) -> bool:\n return item in self.stoi", "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_sequence(item):\n return (not hasattr(item, \"strip\") and\n (hasattr(item, \"__getitem__\") or hasattr(item, \"__iter__\")))", "def _is_sequence(obj):\n return hasattr(obj, \"__iter__\") and not isinstance(obj, str)", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def isItem(obType,iType):\n if iType == 'sword':\n return isinstance(obType,Sword)\n elif iType == 'axe':\n return isinstance(obType,Axe)\n elif iType == 'shield':\n return isinstance(obType,Shield)\n elif iType == 'helmet':\n return isinstance(obType,Helmet)\n else:\n pass\n # raise SystemError('Bad item type {} in isItem'.format(iType))", "def test_typeerror_in_case_of_string(self):\n eq_(None,grepit(\"\",\"\",\"\"))", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def test_string_representation(self) -> None:\n item = Item(text=\"some text\")\n self.assertEqual(str(item), \"some text\")", "def check_iterable(value):\n try:\n iter(value)\n if not isinstance(value, six.string_types):\n return True\n else:\n return False\n except Exception as e:\n pass\n\n return False", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def handle_empty_string(item):\n\n convert_to_string = str(item)\n\n return None if len(convert_to_string.strip()) == 0 or convert_to_string == 'None' else convert_to_string.strip()", "def is_string(obj):\n return isinstance(obj, str)", "def test_book_have_unicode_method(self):\n expected = u\"{0} - version {1}\".format(self.book.title, self.book.version)\n self.assertEquals(expected, unicode(self.book))", "def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)", "def is_string(obj):\n return isinstance(obj, basestring)", "def isstringlike(item):\n ret = 1\n try:\n float(item)\n ret = 0\n except ValueError:\n pass\n return ret", "def test_iterate_arlequin_with_raw_return():\n for entry in iterate_arlequin(SNPS_TWO_POPS_TEXT, raw=True):\n assert isinstance(entry, str)", "def _check_item(proc):\n if not isinstance(proc, ReadingProc):\n raise WrongReadingSetItem( 'You try to interract with item(s) of wrong type(s), '\n 'e.g. not describing processes.')", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def test_str(self):\n \n # Create a Resource object\n book = Book(\"Penguin Group\", \"New York\", \"fiction\", 1, \"White Noise\", \n Name(\"Don\", \"\", \"DeLillo\"), \n \"Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.\",\n \"sci-fi\", \"English\", 1985, \"US\", 326, \"book\",\n [\"culture\", \"survival\", \"life\", \"society\"])\n \n # Assert expected result of the str function\n self.assertEqual(str(book), (\"ID: 1 \\nTitle: White Noise \"\\\n \"\\nCreator: Don DeLillo \\nSummary: Delillo's White Noise follows \"\\\n \"narrator Jack Gladney, a professor at a \\nsmall Liberal Arts \"\\\n \"college and describes an academic year. Jack teaches \\nat ... \"\\\n \"\\nGenre: sci-fi \\nLanguage: English \\nYear: 1985 \"\\\n \"\\nCountry: US \\nLength: 326p \\nType: book \"\\\n \"\\nKeywords: culture, life, society, survival\\nPublisher: \"\\\n \"Penguin Group \\nCity: New York \\nCategory: fiction\"))", "def OnGetItemText(self, item, column):\r\n \r\n return \"\"", "def format_item(self,obj):\n return unicode(obj)", "def is_string(value):\n return isinstance(value, (str, bytes))", "def test_with_1_item(self):\n self.assertEqual(humanize_list(['a']),\n 'a')", "def is_item_iterable(item):\n try:\n _ = [_ for _ in item]\n except TypeError:\n return False\n return True", "def is_string(value):\n return isinstance(value, basestring)", "def test_get_item_by_str(self):\n response = self.client.get('/api/v1/category/\"20\"',\n headers=self.attendant_headers)\n self.assertEqual(response.status_code, 404)", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def is_iterable(obj: Any, allow_str: bool = False):\n if isinstance(obj, str) and not allow_str:\n return False\n try:\n it = iter(obj) # noqa\n except TypeError:\n return False\n return True", "def iterator(self):\n return _libsbml.string_iterator(self)", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def check_iterable_item_type(first_item,iter_obj):\n\n if (\n operator.length_hint(first_item) > 1 or\n ( operator.length_hint(first_item) == 1 and not isinstance(first_item,(str,bytes)) ) or\n np.ndim(first_item) != 0\n ):\n return None\n\n dtype = np.dtype(first_item.__class__)\n if dtype.name == 'object' or 'str' in dtype.name or ( 'bytes' in dtype.name and len(first_item) > 1):\n return None\n for item in iter_obj:\n if np.ndim(item) != 0:\n return None\n common_dtype = np.result_type(np.dtype(item.__class__),dtype)\n if ( \n common_dtype.name == 'object' or\n 'str' in common_dtype.name or\n ( 'bytes' in common_dtype.name and len(item) > 1 )\n ):\n return None\n if dtype != common_dtype:\n dtype = common_dtype\n return dtype", "def isiterable(obj, strings=False, isinstance=isinstance, Iterable=Iterable):\n return (isinstance(obj, Iterable) and\n not (isinstance(obj, str) and not strings))", "def test_strings(self):\n\n for cls in [IndependentMoney, Beneficiary, CommitteeBenefactor,\n OtherBenefactor, PersonBenefactor, Benefactor,\n PartyBenefactor, Committee]:\n if cls.objects.all().count() == 0: # bad :(\n try:\n obj = cls()\n except:\n continue\n else:\n obj = cls.objects.all()[0]\n\n self.assertNotIn('Object', str(obj), cls.__name__)\n self.assertNotIn('Object', unicode(obj), cls.__name__)\n\n self.assertNotEqual('', str(obj), cls.__name__)\n self.assertNotEqual('', unicode(obj), cls.__name__)", "def item_iter(self, a):\n raise NotImplementedError", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_iterable(obj, isStrIterable=False):\n if not isinstance(obj, Iterable):\n return False\n else:\n # XNOR is an iff statement A XNOR B = A iff B\n return not xor(isStrIterable, isinstance(obj, str))", "def converter(item):\n pass", "def test_unicode_loads(self):\r\n assert_not_none(\r\n self.store.get_item(Location('edX', 'test_unicode', '2012_Fall', 'course', '2012_Fall')),\r\n )\r\n # All items with ascii-only filenames should load properly.\r\n assert_not_none(\r\n self.store.get_item(Location('edX', 'test_unicode', '2012_Fall', 'video', 'Welcome')),\r\n )\r\n assert_not_none(\r\n self.store.get_item(Location('edX', 'test_unicode', '2012_Fall', 'video', 'Welcome')),\r\n )\r\n assert_not_none(\r\n self.store.get_item(Location('edX', 'test_unicode', '2012_Fall', 'chapter', 'Overview')),\r\n )", "def is_str(x):\n return isinstance(x, str)", "def test_str(self):\n character = self.character\n\n self.assertEqual(str(character), self.character_raw['name'])", "def func(item):\n\t\textract = lambda regex: re.search(regex, item).group(1) if regex else None\n\t\ttry:\n\t\t\tauthors = extract(authors_regex)\n\t\t\ttitle = extract(title_regex)\n\t\t\tyear = extract(year_regex)\n\t\t\tauthor_list = [author.strip() for author in authors.split(author_sep)]\n\t\t\treturn BibItem(author_list, title, year)\n\t\texcept Exception as e:\n\t\t\tprint(\"WARNING: Could not parse item: \\n\" + item)\n\t\t\tprint(\"Error was: \", e)", "def test_get_book_title(self):\n\t\t\n\t\tself.assertTrue(data.get_book_title(46) == '1 Corinthians')", "def __str__(self):\n return self._itemType", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def IsItalic(self, item):\r\n\r\n return item.IsItalic()", "def __getitem__(self, *args):\n return _libsbml.string___getitem__(self, *args)", "def only_ascii(item):\n checked = [i for i in item if allowed(i)]\n return ''.join(checked).lower()", "def is_text(self):\n return self.value_type in (str, unicode)", "def expected_rubbish(self):", "def validate_item(self, form_item, type_):\n if form_item == \"\":\n return None\n else:\n try:\n return type_(form_item)\n except TypeError:\n return None", "def _process_results(items: List[dict]) -> Iterator[GBook]:\n # todo write a test for this func\n for book in items:\n volume = book['volumeInfo']\n\n authors = volume.get('authors')\n if not authors: # If authors is blank, just move on.\n continue\n\n authors = [util.split_author(a) for a in authors]\n\n isbns = []\n for ident in volume.get('industryIdentifiers', []):\n if ident['type'] == 'ISBN_10':\n try:\n isbns.append(int('978' + ident['identifier']))\n except ValueError: # eg an X in the identifier.\n pass\n elif ident['type'] == 'ISBN_13':\n isbns.append(int(ident['identifier']))\n\n if not isbns:\n continue\n\n price = book['saleInfo'].get('retailPrice')\n if price:\n price = price['amount']\n\n try:\n pub_date = saturn.from_str(volume['publishedDate'], 'YYYY-MM-DD')\n except ParserError: # Might be just a year\n pub_date = saturn.from_str(f\"{volume['publishedDate']}-01-01\", 'YYYY')\n except KeyError:\n pub_date = None\n\n yield GBook(\n title=volume['title'],\n authors=authors,\n isbns=isbns,\n\n internal_id=book['id'],\n\n language=volume.get('language').lower(),\n description=volume.get('description'),\n publication_date=pub_date,\n publisher=volume.get('publisher'),\n categories=volume.get('categories', []),\n\n book_url=volume.get('infoLink'),\n epub_url=book['accessInfo']['epub'].get('downloadLink'),\n pdf_url=book['accessInfo']['pdf'].get('downloadLink'),\n purchase_url=book['saleInfo'].get('buyLink'),\n price=price,\n )", "def isAlphabet(self, seqstr):\n mystr = seqstr\n if type(seqstr) is Sequence:\n mystr = seqstr.getString()\n return self.getAlphabet().isValidString(mystr)", "def mapper(item: Union[str, object]) -> str:\n return str(item)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def __str__(self):\n return str(self.item)", "def is_iterable(x):\n if isinstance(x, six.string_types):\n return False\n return hasattr(x, '__iter__')", "def _getCommented(self, items): # TODO: move this method to more suitable place when possible (scripting base class)\r\n\r\n if isinstance(items, type(())): # if attribute is tuple, it is coordinates. cast to string\r\n return str(items)\r\n\r\n # if attribute is int, cast to string\r\n if type(items) == types.IntType:\r\n items = str(items)\r\n\r\n if isinstance(items,type([])):\r\n isList = True\r\n items = items[:] # do not modify original list of items, otherwise it cannot be used in verify multiple times\r\n elif items.find('//')>-1:\r\n items = items.split('//')\r\n isList = True\r\n else:\r\n isList = False\r\n items = [items]\r\n\r\n for i in range(len(items)):\r\n # empty string check\r\n if not len(items[i]):\r\n continue\r\n\r\n negate = False\r\n\r\n if items[i][0] == '!' and len(items[i]) > 1: # check if first char is ! and it's not the only char\r\n items[i] = items[i][1:]\r\n negate = True\r\n\r\n if items[i][0] == 'n': # if string could be a logical text id\r\n translation = self.phone.getTranslation(items[i])\r\n if translation != None:\r\n items[i] = '%s (%s)' % (translation,items[i])\r\n\r\n if \"/\" in items[i]: # if string could be an image\r\n imagefile = self._getImage(items[i])\r\n if items[i] != imagefile:\r\n items[i] = '%s (%s)' % (imagefile, items[i])\r\n\r\n if negate == True:\r\n items[i] = '!' + items[i]\r\n\r\n if isList:\r\n return items\r\n\r\n return items[0]", "def test_books_model_entry(self):\n data = self.data1\n self.assertTrue(isinstance(data,Books))", "def _get_bad_return_tags():\n # case not a str or a list:\n is_int = [12]\n # case is a list, but not all elements are str:\n is_not_all_str = [[\"this\", \"is\", \"a\", \"test\", 12, \"!\"]]\n\n return is_int + is_not_all_str", "def test_check_only_one_fontName(self):\n fonts = []\n result = False\n for css_class in self.pisa_doc.css[0].values():\n for font in css_class.values():\n fonts.append(font)\n for font in fonts:\n if not isinstance(font, list):\n result = True\n else:\n result = False\n break\n #here we are checking if all objects in fonts list are str, the result have to be True\n self.assertTrue(result)", "def testFormatISBN(self): \n val = format_isbn(\"1234567894123\")\n self.assertEqual(val,\"123-4-567-89412-3\")", "def test_return_type(self):\n self.assertEqual(type(self.s0.from_json_string(self.string)), list)", "def __iter__(self):\r\n return self.strings.iteritems()", "def is_isbn(val):\n if is_isbn10(val) or is_isbn13(val):\n if val[0:3] in [\"978\", \"979\"] or not is_ean13(val):\n return True\n return False", "def stringable(self):\n return True", "def _iter_strings(self, indent=1, show_counts=True):\n ...", "def process_item(self, item, spider):\n\n # Memory has extra postfix letters and they need to be remove\n # and then converted into actual integer\n numeric = RE_MATCH.match(item['Memory']).group(0)\n item['Memory'] = int(numeric)\n\n # The same case as above but here the value is a float\n numeric = RE_MATCH.match(item['Base Frequency']).group(0)\n item['Base Frequency'] = float(numeric)\n\n \"\"\"\n Some folks identify MB as number making it 'int' in the spider causing\n Pandas to get crazy so the value is explicity marked as 'str'.\n\n Also sometimes motherboard information is missing but as it is not\n necessarily making the data obsolete, the result is still stored.\n \"\"\"\n item['Motherboard'] = str(item['Motherboard']) if 'Motherboard' in item else ''\n\n # In order to keep potential string processing simples, they are all\n # converted into lowercase strings.\n for key in item:\n if type(item[key]) is str:\n item[key] = item[key].lower()\n\n return item", "def add_quote(item):\n if type(item) == str:\n return \"\\'\" + item + \"\\'\"\n else:\n return item", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def _type(self, item):\n return self.cv.type(item)", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def not_iterable(obj):\n return hasattr(obj,\"rstrip\") or not (hasattr(obj,\"__getitem__\") or hasattr(obj,\"__iter__\"))", "def test_get_items_from_string() -> None:\n assert [\"i\", \"p\"] == common_util.get_items_from_string(\"i, ,p\")\n assert [\"i\", \"p\"] == common_util.get_items_from_string(\"i- -p\", separator=\"-\")\n assert [\"i\", \" \", \" p\"] == common_util.get_items_from_string(\"i, , p\", remove_blanks=False)\n assert [\"i\", \"p\"] == common_util.get_items_from_string(\"i, , p\")\n assert [] == common_util.get_items_from_string(\"\")", "def ISTEXT(value):\n return isinstance(value, (basestring, AltText))", "def _maybe_show_implicit_non_ascii_error(self, node):\n if six.PY3:\n return\n if not isinstance(node.s, bytes):\n return\n if not any(ord(c) > 127 for c in node.s):\n return\n if any(\n self.filename.endswith(suffix)\n for suffix in self.config.IGNORED_FILES_FOR_EXPLICIT_STRING_LITERALS\n ):\n return\n # for multiline strings, the lineno is the last line and the col_offset is -1\n # there appears to be no simple way to get to the beginning of the string, and therefore no\n # way to determine whether there is a b prefix, so just ignore these strings\n if node.col_offset == -1:\n return\n line = self._lines()[node.lineno - 1]\n char = line[node.col_offset]\n if char in (\"b\", \"u\"):\n return\n self._show_error_if_checking(\n node,\n \"string containing non-ASCII characters should be explicitly marked as bytes or \"\n \"unicode\",\n error_code=ErrorCode.implicit_non_ascii_string,\n )", "def is_str_or_coll(value):\n return bool(is_str(value)) or bool(is_tuple_or_list(value))", "def checkio(element):\n return [\"\", u\"\", \"\"]" ]
[ "0.67216665", "0.59875196", "0.5834853", "0.56345963", "0.5633404", "0.5623628", "0.5504701", "0.550421", "0.5502145", "0.5502145", "0.5502145", "0.5437296", "0.54353356", "0.54093117", "0.5380066", "0.5354463", "0.5319337", "0.52930754", "0.5278437", "0.52694666", "0.52569604", "0.5247714", "0.52353233", "0.52265364", "0.522533", "0.5180457", "0.51715535", "0.515377", "0.5139933", "0.5129124", "0.5126358", "0.5119757", "0.5094057", "0.5079186", "0.5044467", "0.504299", "0.5042077", "0.5035186", "0.5032342", "0.50157297", "0.50107056", "0.5007886", "0.50026935", "0.49967226", "0.49754384", "0.49732476", "0.49666777", "0.496591", "0.4963864", "0.49626872", "0.49483454", "0.49482504", "0.4933864", "0.4921206", "0.49122918", "0.4904862", "0.4887502", "0.48843753", "0.48814267", "0.4862335", "0.48495752", "0.48414862", "0.48310217", "0.4829632", "0.48238978", "0.48171794", "0.48126328", "0.48114955", "0.48113263", "0.48083207", "0.48056257", "0.4803724", "0.47758612", "0.4772381", "0.47695357", "0.4765524", "0.4763714", "0.47632492", "0.47595537", "0.47560683", "0.47491142", "0.4741219", "0.47174755", "0.47144282", "0.4708895", "0.47069156", "0.46964997", "0.4696036", "0.4695249", "0.467612", "0.46718174", "0.4654934", "0.4650999", "0.46504554", "0.46402243", "0.46394408", "0.4638097", "0.46364495", "0.46353874", "0.46343336", "0.46291688" ]
0.0
-1
appends entry to text document
def add_registry(self) -> None: # inits functions corresponding to user input and takes in url input item_options = {'n': self.inp_item_price, 'y': self.inp_book_prices} url = str(input("Enter URL to amazon item: ")) # validates url input - prevents inputting duplicate and/or blank URLs if(url == "" or url in self.load_links()[1]): print("Item not added - URL already exists or is blank") return # user-input price(s) -> then -> validates price input prices = item_options.get(self.input_item_category())() try: for price in prices: float(price) except ValueError: print("Do not include any letters or symbols other than '.' - Item not added!") return # writes input as a line of text to text file with open(URL_FILE, 'a') as text_file: text_file.write(self.format_string(url, prices)) pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, entry):\n \"An entry is a tuple of (id, datatime, text).\"\n id = entry[0]\n datee = entry[1]\n text = re.sub('[^A-Za-z0-9]+', ' ', entry[2].lower())\n self.recordsDict[id].create(id, datee, entry[2])\n for word in text.split():\n self.wordDict[word].add(id)", "def save_txt():\n # open file and append, if it doesn't exist then create it.\n with open('journal_entries.txt', 'a+') as f:\n # .get the input in text widget at the first line, '0th' character, then read until the end\n f.write(\"\\n\" + get_date_time())\n for i in range(len(entries)):\n string = entries[i].get('1.0', 'end-1c')\n if string:\n f.write(\"\\n\" + string)", "def addContent(text):", "def append(self, doc):\n pass", "def append(self, entry):\n self.strings.append(entry)", "def write(self, text):\n\n # Append without new line:\n\t\tself.wg.append(text)", "def write(self, txt):\n self.contents += txt", "def append(self, text):\n self.text += text", "def append_write(filename=\"\", text=\"\"):\n with open(filename, \"a\") as file:\n return file.write(text)", "def add_new_entry(self):\n clear_screen()\n new_entry = Entry.create()\n if new_entry is None:\n print(\"Add new entry cancelled. Returning to main menu...\")\n time.sleep(1)\n return None\n self.entries.append(new_entry)\n with open(self.file_name, \"a\") as file:\n writer = csv.writer(file)\n writer.writerow([new_entry.date, new_entry.name, new_entry.minutes, new_entry.note])", "def add_document_text(original_text, new_text_to_add):\n return original_text + r'+\"{0}\"'.format(new_text_to_add)", "def append_write(filename=\"\", text=\"\"):\n with open(filename, 'a') as f:\n return f.write(text)", "def append_write(filename=\"\", text=\"\"):\n with open(filename, 'a', encoding='utf-8') as file:\n return file.write(text)", "def append_write(filename=\"\", text=\"\"):\n with open(filename, \"a\", encoding=\"utf=8\") as TxtFile:\n return TxtFile.write(text)", "def append_write(filename=\"\", text=\"\"):\n with open(filename, 'a', encoding='utf-8') as f:\n return f.write(text)", "def append_write(filename=\"\", text=\"\"):\n with open(filename, mode=\"a\") as file:\n return file.write(text)", "def append_write(filename=\"\", text=\"\"):\n with open(filename, \"a\", encoding=\"utf-8\") as file1:\n return file1.write(text)", "def append_write(filename=\"\", text=\"\"):\n with open(filename, 'a', encoding='utf=8') as f:\n return f.write(text)", "def append_write(filename=\"\", text=\"\"):\n with open(filename, 'a', encoding=\"UTF8\") as f:\n return f.write(str(text))", "def write(self, txt): \n self.log.appendtext(txt)\n self.log.update_idletasks()\n return", "def append(self, text):\n cursor = QTextCursor(self._doc)\n cursor.movePosition(QTextCursor.End)\n cursor.insertBlock()\n cursor.insertText(text)", "def append_write(filename=\"\", text=\"\"):\n with open(filename, \"a\", encoding=\"utf-8\") as content:\n return content.write(text)", "def append_write(filename=\"\", text=\"\"):\n with open(filename, \"a\", encoding='utf-8') as the_file:\n return(the_file.write(text))", "def append(self, document):\n raise NotImplemented(\"Corpus does not allow appending\")", "def write_text(self, text):\n self.ui.plainTextEdit.appendPlainText(text)\n logging.info(text)", "def append_write(filename=\"\", text=\"\"):\n with open(filename, mode=\"a+\") as f:\n return f.write(text)", "def perform_insert(self, entry, new_text, new_pos):\n entry.handler_block_by_func(self.cb_f1_entry_1_insert_float)\n entry.set_text(new_text)\n entry.handler_unblock_by_func(self.cb_f1_entry_1_insert_float)\n\n GObject.idle_add(entry.set_position, new_pos)\n\n entry.stop_emission(\"insert_text\")\n return", "def add_text(self,\r\n index,\r\n addtext):\r\n\r\n oldkeyset = self.get_keys_from_note(index)\r\n oldtext = self.get_text_from_note(index)\r\n oldmeta = dict(self.get_metadata_from_note(index))\r\n oldmeta['date'].append(str(datetime.datetime.now()))\r\n\r\n self.delete(index)\r\n self.addnew(oldkeyset,\r\n oldtext+addtext,\r\n metadata=oldmeta)", "def append(self, doc):\n if doc is None:\n return\n assert isinstance(doc, Document)\n self.fh.write(doc.save_mem(fmt=\"json\"))\n self.fh.write(\"\\n\")\n self.n += 1", "def save_text(self):\n content = self.get_content()\n if content != '':\n self.text.append((content, self.context, self.ancestor))", "def append_write(filename=\"\", text=\"\"):\n with open(filename, 'a', encoding='utf-8') as MyFile:\n return(MyFile.write(text))", "def append_write(filename=\"\", text=\"\"):\n with open(filename, 'a') as f:\n a = f.write(str(text))\n return a", "def add_entry(self, entry: str) -> None:\n self.entries.append(f\"{self.count}: {entry}\")\n self.count += 1", "def append(self, text):\n\n # Split 'text' into lines.\n lines = [ l.strip() for l in text.strip().split(\"\\n\") ]\n for line in lines:\n self.lines.append(line.strip())", "def append_write(filename=\"\", text=\"\"):\n with open(filename, 'a') as new:\n appended = new.write(text)\n new.close()\n return appended", "def write(self, new_text: str):\n self.text += new_text", "def append_write(filename=\"\", text=\"\"):\n with open(filename, 'a', encoding='utf-8') as fd:\n x = fd.write(text)\n print(x)", "def insert_text(self, text):\n self.str += text", "def add_text(self, text: str) -> None:\n self.texts.append(text.strip().rstrip(\"\\n\"))", "def add_user_text(self):\n text_to_add = self.user_string_entry.get()\n self.user_string_entry.delete(0, tk.END)\n self.markov_chain.add_string(text_to_add)", "def write_to_file(entry, file):\n with open(file, \"a\") as f:\n f.write(entry)", "def AppendText(self, text):\n self.__context.builder.DocumentAppend(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n text)\n self._blip_data.content += text", "def appendText( self, text, lineNumber=0 ):\n try:\n # Works for TextCommand, otherwise breaks\n self.commands[-1].text += text\n except IndexError as e:\n # First command? Then the list will have been empty.\n self.commands.append( self.makeContent(text,lineNumber) )\n except AttributeError as e:\n # Not a TextCommand? Then there won't be a text attribute.\n self.commands.append( self.makeContent(text,lineNumber) )", "def WriteText( self, text ) :\n # Always adjust the insertion point BEFORE the insertion.\n self.folderTxtCtl.SetInsertionPointEnd()\n self.folderTxtCtl.WriteText( text )", "def new_entry():\n clear_screen()\n entry = {}\n entry['id'] = get_next_id()\n entry['name'] = input_name()\n print(\"How many minutes did you spend on {}?\".format(entry['name']))\n print(\"Or you may specify a format after the time, seperated by a comma\")\n entry['time_spent'] = input_time_spent()\n add_notes = input(\"Add notes? Y/n \").lower()\n if add_notes != 'n':\n entry['notes'] = input_notes()\n entry['date'] = datetime.now().strftime(FMT_MONTH_DAY_YEAR)\n with open(WORK_LOG_FILENAME, 'a', newline='') as work_log:\n work_log_writer = csv.DictWriter(work_log, fieldnames=FIELDNAMES)\n work_log_writer.writerow(entry)", "def append(self, doc):\n if doc is None:\n return\n assert isinstance(doc, Document)\n data = {}\n if self.data_fields:\n if isinstance(self.data_fields, list):\n for fname in self.data_fields:\n data[fname] = doc.features[self.data_feature][fname]\n else:\n data.update(doc.features[self.data_feature])\n # assign the document field last so it overwrites anything that comes from the data feature!\n if self.document_bdocjs:\n data[self.document_field] = doc.save_mem(fmt=\"json\")\n else:\n data[self.document_field] = doc.text\n self.fh.write(json.dumps(data))\n self.fh.write(\"\\n\")\n self.n += 1", "def add_text(self, text):\n self.text = self.text + text", "def add_document(self, doc_info):\n docname = doc_info[u'name']\n docid = doc_info[u'url']\n terms = doc_info[u'terms']\n text = doc_info[u'text']\n reader_name = doc_info[u'reader']\n doc = self.get_document(docid)\n if doc is not None:\n if doc.text == text and doc.reader == reader_name:\n # nothing has changed, so return\n return\n self._clear_document(docid)\n \n term_counts = defaultdict(int)\n for term in terms:\n if isinstance(term, tuple):\n # this is a (term, value) tuple\n term, value = term\n else:\n value = 1\n term_counts[term] += value\n term_items = term_counts.items()\n total = 0\n for term, value in term_counts.items():\n self._increment_term_count(term, abs(value), True)\n total += abs(value)\n self._increment_term_count(ANY, total, True)\n\n for key, value in doc_info.get('tags', []):\n self.set_tag_on_document(docid, key, value)\n \n doc = Document(docid, docname, reader_name, text, term_items)\n self.sql_session.add(doc)\n #self.commit()", "def data(self, text):\n if self._keep_text:\n self._text.append(text)", "def add_command():\n backend.insert(title_text.get(),\n author_text.get(),\n year_text.get(), \n isbn_text.get())\n \n # listing.delete(0, END)\n listing.insert(END, \n (title_text.get(),\n author_text.get(), \n year_text.get(), \n isbn_text.get()))", "def sendAppendEntry(self, center_id):\n prevEntry = self.log[self.nextIndices[center_id]-1]\n self.server.appendEntry(center_id, self.current_term,\n prevEntry.index, prevEntry.term,\n self.log[self.nextIndices[center_id]:],\n self.commit_idx)", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def append_sequence_of_text(self, sequence):\n file_name = self.file_name\n with open(file_name, 'a+') as file:\n file.write('\\n\\n')\n for line in sequence:\n file.write('\\t{}\\n'.format(line))", "def append_string_to_textfile(filename, string):\n filepath = root + filename\n with open(filepath, 'a+') as file:\n file.write(string + \"\\n\")", "def addEntry(self, entry):\n \n with open(self.current_log, 'ab') as a:\n logAppender = csv.writer(a, delimiter=\"|\")\n logAppender.writerow(entry)", "def put_attach_document(filename: str, entry_hash: str) -> str:\n g.ledger.file.insert_metadata(entry_hash, \"document\", filename)\n return f\"Attached '{filename}' to entry.\"", "def db_add_entry(person):\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n if person.name in db:\n print(\"Updating existing entry ..... {name}\\n\".format(name=person.name))\n else:\n person.new = True\n print(\"Adding new entry ..... {name}\".format(name=person.name))\n db[person.name.capitalize()] = person.phone\n db.sync()\n db.close()\n db_show_all()", "def save_text(self, text: Text):\n self.text_versions.append(deepcopy(text))", "def _log_append(self, msg):\n\t\tp = self._edit.get_buffer()\n\t\tstart,end = p.get_bounds()\n\t\tp.insert(end, msg)\n\t\tself._trunc_lines()\n\t\tself._edit.scroll_to_iter(p.get_end_iter(), 0.0)", "def addText(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def append_to_log(file_name: str, text: str):\n if not log_file_exists(file_name):\n create_log_file(file_name)\n log = open(get_complete_file_name(file_name), 'a')\n log.write(text)\n log.write(\"\\n\")\n log.close()", "def add_text(self, text):\n text_template = self.templateEnv.get_template(f'{ReportGenerator.COMPONENTS_FOLDER}/text.html')\n text_output = text_template.render(text=text)\n self.contents.append(text_output)", "def displayText(self):\n if self.entryWidget.get().strip() == \"\":\n tkMessageBox.showerror(\"Tkinter Entry Widget\", \"Enter a text value\")\n else:\n self.file_com.write(self.entryWidget.get().strip()+'\\n')", "def add(self, document):\n #words=[word.lower() for word in words if word.isalpha()] #added on 0415\n for token in [t.lower() for t in nltk.word_tokenize(document)]:\n if not token.isalpha():\n continue\n\n if token in self.stopwords:\n continue\n \n if self.stemmer:\n token = self.stemmer.stem(token)\n \n if self.__unique_id not in self.index[token]:\n self.index[token].append(self.__unique_id)\n \n self.documents[self.__unique_id] = document\n self.__unique_id += 1", "def append_write(filename=\"\", text=\"\"):\n with open(filename, 'a', encoding=\"utf-8\") as f:\n count = f.write(text)\n return count", "def insert_addition(self, text, user):\r\n added_at = date.today()\r\n addition = TopicAddition(date=added_at, text=text, user=user)\r\n \r\n session = self.persistence.get_session() \r\n session.add(addition)\r\n session.commit()", "def append_new_line(file_name, text_to_append):\n # Open the file in append & read mode ('a+')\n with open(file_name, \"a+\") as file_object:\n # Move read cursor to the start of file.\n file_object.seek(0)\n # If file is not empty then append '\\n'\n data = file_object.read(100)\n if len(data) > 0:\n file_object.write(\"\\n\")\n # Append text at the end of file\n file_object.write(text_to_append)", "def append_text(file_path, text):\n # Check if file ends with txt\n if not file_path.endswith('.txt'):\n raise IllegalArgumentError(f\"{file_path} needs to have a .txt extension\")\n\n # Write file\n with open(file_path, 'a') as file:\n if isinstance(text, str):\n file.write(text)\n elif isinstance(text, list):\n file.writelines(text)\n else:\n raise IllegalArgumentError(\"text variable is not a string or list of strings\")\n\n return True", "def add_entry(self):\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n je.Editor(self.session, self.source.tbl, self.source)", "def write_text(self, text):\n Application.write_text(self, text, self.TXT_FIELD)", "def write(self, text):\n self.text = text", "def append_write(filename=\"\", text=\"\"):\n\n with open(filename, \"a\") as my_file:\n nb_char = my_file.write(str(text))\n my_file.close()\n return (nb_char)", "def _SaveEntries(self, entries):\n text = \"entries = \\\\\\n\" + pprint.pformat(entries, 2) + '\\n'\n file_path = os.path.join(self._root_dir, self._options.entries_filename)\n gclient_utils.FileWrite(file_path, text)", "def create_entry():\n new_entry = DB_Entry() # Create instance of entry to add the info to\n print('Eratosthenes is ready to add your new entry.\\n')\n new_entry.set_id()\n title = input('Enter the title:\\n')\n new_entry.set_title(title)\n authors = input('Enter the authors as list of surname, firstname separated by semicolons:\\n')\n new_entry.set_authors(authors)\n try:\n year = int(input('Enter the year:\\n'))\n except ValueError:\n try:\n year = int(input('Enter the year as an integer:\\n'))\n except ValueError:\n print('You failed to follow basic instructions. The year is set to 2000\\n')\n year = 2000\n new_entry.set_year(year)\n pub_type = input('Enter the publication type as article/review/book/other:\\n')\n try:\n new_entry.set_type(pub_type)\n except ValueError:\n try:\n pub_type = input('Type must be one of article/review/book/other:\\n')\n new_entry.set_type(pub_type)\n except ValueError:\n print('You failed to follow basic instructions. Type is now set to \\'other\\'\\n')\n pub_type = 'other'\n new_entry.set_type(pub_type)\n keywords = input('Enter list of keywords separated by semicolons:\\n')\n new_entry.set_keywords(keywords.split(';'))\n current_path = input('Enter the current path to the file\\n')\n current_path = current_path.replace('~', '/Users/marcus')\n if not os.path.isfile(current_path):\n print('File not found. Please try again')\n current_path = input('Enter the current path to the file\\n')\n if not os.path.isfile(current_path):\n print('File not found')\n new_entry.set_new_path()\n db_actions.copy_file(new_entry.get_path(), current_path)\n return new_entry", "def addWordEntry(self, data, course):\n\n\t\tdef parseWordData(Data):\n\t\t\tdivisor = \"@@\"\n\t\t\tresult = Data.split(divisor)\n\n\t\t\tif len(result) < 2:\n\t\t\t\treturn None, None\n\t\t\telse:\n\t\t\t\tresult = [i.strip(\"\\t\\n\\r \") for i in result]\n\t\t\t\tif not result[0] or not result[1]:\n\t\t\t\t\treturn None, None\n\n\t\t\t\treturn result[0], result[1]\n\n\t\tword, translation = parseWordData(data)\n\n\t\tif not word:\n\t\t\treturn False\n\t\telse:\n\t\t\tcommand = \"INSERT INTO words (word, translation, level, course, last_refresh)\" \\\n\t\t\t\t\t\" VALUES (?, ?,0, ?, 0);\"\n\t\t\tparams = (word, translation, course,)\n\n\t\t\tself._run_command(command, params)\n\t\t\treturn True", "def append_write(filename=\"\", text=\"\"):\n count = 0\n with open(filename, 'a', encoding='utf-8') as f:\n count = f.write(str(text))\n return count", "def append_entry(host, email, password, mailbox):\n\n new_entry = {\n\n 'host': host,\n 'email': email,\n 'password': password,\n 'mailbox': mailbox\n }\n\n with open('data.json') as f:\n data = load(f)\n\n data[\"items\"].append(new_entry)\n\n with open('data.json', 'w') as outfile:\n dump(data, outfile, indent=4)\n\n print('\\nNew Entry Added Successfully!')", "def append_text(text_area, outstring):\n text_area.insert(Tk.END, outstring)\n text_area.see(Tk.END)", "def new_entry(title, content):\n\n title.strip # Remove the spaces from both sides.\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n return False\n default_storage.save(filename, ContentFile(content))\n return True", "def write(self, txt):\n if self._dirty:\n self.update(\"\")\n self._dirty = False\n self._stream.write(txt)", "def add(self, line):\n self.body.append(line)", "def append_log_message(self, text):\n self._new_logs.append(text)", "def append(self, doc):\n if doc is None:\n return\n assert isinstance(doc, Document)\n path = self.file_path_maker(doc=doc, idx=self.idx)\n path = os.path.normpath(\n path\n ) # convert forward slashes to backslashes on windows\n path = os.path.join(self.dirpath, path) + self.ext\n # check if we need to create the directories. For this we first need to get the directories part of the path,\n # which is everything left of the last slash\n if os.path.sep in path:\n dirs = path[: path.rindex(os.path.sep)]\n if not os.path.exists(os.path.normpath(dirs)):\n os.makedirs(dirs)\n Document.save(doc, path, fmt=self.fmt)\n self.idx += 1", "def append(self, entry):\n if not isinstance(entry, Entry):\n raise ValueError('Not an tlv8.Entry: {e}'.format(e=entry))\n self.data.append(entry)", "def update(self,\r\n keyset,\r\n text,\r\n meta=None,\r\n as_child=False,\r\n right_at=False):\r\n if meta is None:\r\n meta = {}\r\n as_next = right_at and not as_child\r\n\r\n returntext = add_form(keyset.union(set(self.defaults.get('defaultkeys'))),\r\n text,\r\n meta,\r\n as_child=as_child,\r\n as_next=as_next)\r\n directoryname = os.getcwd()+'/textfiles'\r\n textfile = open(directoryname+SLASH+prefix+'backup'+'.txt', 'ab')\r\n textfile.write(codecs.encode(returntext.replace\r\n ('\\ufeff', EMPTYCHAR).replace(EOL, '\\r\\n')\r\n +'\\r\\n',\r\n encoding='utf-8',\r\n errors='ignore'))\r\n #Codecs encoding to prevent unicode error\r\n textfile.close()", "def add_item(self, text):\n\t\tnew_todo = self.todolist.add(text)\n\t\tself.store.append((new_todo.id, text))", "def add_entry_to_bibtex_db(self, ent):\n\n # add additional fields manually to the dict\n ent.consolidate_dict()\n self.bibtex_db.entries.append(ent.raw_dict)\n # the following updates the entries dict\n # self.bibtex_db.get_entry_dict()\n # # make sure it's there\n # if ent.ID not in self.bibtex_db.entries_dict:\n # self.bibtex_db.entries_dict[ent.ID] = ent.raw_dict", "def append(self, search):\n query_values = {\n \"id\": str(search.id),\n \"term\": search.term,\n \"timestamp\": search.timestamp\n }\n\n self._cursor.execute(f\"\"\"\n INSERT INTO {self._table_name}\n VALUES (:id, :term, :timestamp);\"\"\", query_values)\n\n self._conn.commit()", "def add_txt_record(self, record_name, record_content, record_ttl):\n\n domain = dns.resolver.zone_for_name(record_name)\n\n n = dns.name.from_text(record_name)\n rel = n.relativize(domain)\n\n response = requests.put('{server}/update/{zone_name}/add?name={name}&rtype={rtype}&value={value}&ttl={ttl}'\n .format(server=self.server, zone_name=domain, name=rel, rtype='txt',\n value=record_content, ttl=record_ttl),\n auth=self.auth)\n\n if not response.ok:\n raise errors.PluginError('Encountered error adding TXT record: {0}'\n .format(response.text))", "def add_blog(self, text):\n self.blog.add_blog(text)\n self.refresh()", "def addMatchEntry(tmpFD, match, mtype, val):\n tmpFD.write(f\" {match} {mtype} {val}\\n\")", "def _add_entry(self, entry_id: int, text: str, category=None, new_field_dict=None):\n if category is None:\n category = self.active_category\n if category is None:\n raise ValueError(\"Cannot add entry without specifying category if 'active_category' is None.\")\n if entry_id < 0:\n self.CustomDialog(\"Entry ID Error\", message=f\"Entry ID cannot be negative.\")\n return False\n if entry_id in self.get_category_data():\n self.CustomDialog(\n title=\"Entry ID Error\",\n message=f\"Entry ID {entry_id} already exists in category {camel_case_to_spaces(self.active_category)}.\",\n )\n return False\n\n self._cancel_entry_id_edit()\n self._cancel_entry_text_edit()\n self.get_category_data()[entry_id] = new_field_dict # add entry to category dictionary\n self._set_entry_text(entry_id, text)\n self.select_entry_id(entry_id, set_focus_to_text=True, edit_if_already_selected=False)\n\n # TODO\n # if from_history:\n # self.jump_to_category_and_entry(category, text_id)\n # if not from_history:\n # self.action_history.record_action(\n # undo=partial(self._delete_entry, category, text_id),\n # redo=partial(self._add_entry, category, text_id, text),\n # )\n # self.unsaved_changes.add((self.active_category, text_id, 'add'))\n\n return True", "def _append_speech(self, speech):\n with open(self.output_directory + \"speech_collection.txt\", \"a+\", encoding=\"utf-8\") as sp_coll:\n sp_coll.write(speech + \"\\n\")", "def add_relative_entry(self, entry_id, offset=1, text=None):\n if text is None:\n text = self.get_entry_text(entry_id) # Copies name of origin entry by default (can be overridden).\n new_field_dict = self.get_field_dict(entry_id).copy()\n self._add_entry(entry_id=entry_id + offset, text=text, new_field_dict=new_field_dict)", "def append(self, tag):\r\n self.insert(len(self.contents), tag)", "def insert_text_in_file(file_path: pathlib.Path, tag: str, text: str) -> bool:\n lines: List[str] = []\n with file_path.open('r') as f:\n lines = f.readlines()\n for ii, line in enumerate(lines):\n if line.find(tag) >= 0:\n lines.insert(ii + 1, text)\n with file_path.open('w') as f:\n f.writelines(lines)\n return True\n return False", "def append_write(filename=\"\", text=\"\"):\n\n with open(filename, 'a', encoding='utf-8') as file:\n number_lines = file.write(text)\n return number_lines", "def append(self, line):\n self.ag.append(line)", "def test_add_1(self):\n contents = testdata.get_words()\n d = testdata.create_dir()\n ts = {\n \"foo.txt\": [contents],\n }\n ds = d.add(ts)\n path = ds[0]\n self.assertTrue(os.path.isfile(path), \"{} does not exist\".format(path))\n self.assertEqual(contents, path.read_text())", "def _initNewEntryDocument(self, atomDoc): #@UnusedVariable #$NON-NLS-1$\r\n pass", "def writeln(self, content):\n ..." ]
[ "0.7126714", "0.6999089", "0.685365", "0.67971444", "0.6738247", "0.64986163", "0.64700305", "0.64590263", "0.645142", "0.6435323", "0.64345604", "0.6418391", "0.6415381", "0.639371", "0.6388531", "0.63824403", "0.63644826", "0.63609344", "0.63544655", "0.6342864", "0.6342211", "0.63394153", "0.6336318", "0.6322128", "0.6320305", "0.631786", "0.6282077", "0.6277477", "0.6271983", "0.62696975", "0.6265307", "0.62508917", "0.6226449", "0.62207973", "0.6213248", "0.61971074", "0.6191933", "0.619124", "0.6119132", "0.6114591", "0.60917217", "0.6049712", "0.60494995", "0.6046622", "0.60240406", "0.59932244", "0.595923", "0.59308237", "0.59191924", "0.59189177", "0.5915055", "0.5908925", "0.59039456", "0.5897721", "0.5885935", "0.58667064", "0.5829109", "0.58240163", "0.5823003", "0.580087", "0.57941926", "0.57921636", "0.57914144", "0.5778315", "0.5776788", "0.5772656", "0.57725674", "0.57685405", "0.57679296", "0.5740971", "0.57393306", "0.5725867", "0.57242966", "0.5718858", "0.57123023", "0.571119", "0.57085055", "0.56948835", "0.56937116", "0.5680016", "0.5678191", "0.56739664", "0.5666438", "0.56637114", "0.564861", "0.56460685", "0.5643547", "0.5643305", "0.5639721", "0.5639322", "0.5634357", "0.56317025", "0.56252635", "0.56131154", "0.5610096", "0.56075174", "0.560631", "0.56023544", "0.55972975", "0.5596504", "0.55953515" ]
0.0
-1
user enters an integer and the corresponding link is deleted
def delete_registry(self) -> None: self.view_registry() links = self.load_links()[0] try: url_to_delete = links[abs(int(input("Enter no. of URL to delete: ")))] except IndexError: print('Item not found - Nothing was deleted') return with open(URL_FILE, 'w') as f: for link in links: if(link != url_to_delete): f.write(link+'\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete():\n id_num = int(input('Enter the ID number of the item you wish to delete\\n'))\n db_actions.remove(id_num)", "def unlink(self, link_id):", "def remove_link():", "def delete():", "def delete_secret_link(link_id):\n\n Secret_Link.objects.filter(link_id=link_id).delete()", "def delete_entry():\n u_id = request.args(0) or redirect(URL('moderation', 'new_entries'))\n db(db.lioli_main.unique_id == u_id).delete()\n redirect(URL('new_entries'))\n return dict()", "def delete(self):\n if jwthandler.authorize_action(self, 1) == False:\n return None\n\n userdata = jwthandler.decode_userdata(self.request.headers[\"Authorization\"])\n\n body_categories = {\"link_id\": 1}\n link_dict = errorutil.check_fields(self.request.body.decode(), body_categories, self)\n\n if link_dict == False or linkutil.delete_link(link_dict[\"link_id\"], self) == False:\n return None\n\n formatted_message = loggerhandler.form_delete_message_dictionary(userdata, \n \"link\", \n link_dict[\"link_id\"])\n\n\n loggerhandler.log_message(\"delete\", formatted_message)\n\n self.write({\"message\":\"Success\"})", "def delete_entry(entry):\n\n # Llama a view_entries despues de haber añadido nueva funcionalidad\n\n response = input(\"Estás seguro? [yN]\").lower()\n\n if response == 'y':\n entry.delete_instance()\n print('Entrada borrada.')", "def delete_link(self):\n self.link_layout.links_list.remove_widget(self)\n self.link_layout.links.remove(self.text)\n utils.update_data()\n utils.data[self.link_layout.parent_screen.name]['links'] = self.link_layout.links\n utils.save_project_data(utils.data[self.link_layout.parent_screen.name],\n f\"{utils.data[self.link_layout.parent_screen.name]['proj_path']}/project_data.json\")", "def delete_inventory():\r\n strIDDel = input('Which ID would you like to delete?: ').strip()\r\n while ValueError:\r\n try: \r\n int(strIDDel)\r\n break\r\n except ValueError:\r\n strIDDel = input('Error: ID must be numeric. Enter ID: ').strip()\r\n return strIDDel", "def delete(self, _id):", "def delete(self, something):\n if something == Concept:\n number = 0\n target_list = self.concept_list\n elif something == Subcategory:\n number = 1\n target_list = self.concept_list\n elif something == Relation:\n number = 2\n target_list = self.relation_list\n if target_list.currentIndex().isValid():\n something = target_list.selectedItems()[0].data(Qt.UserRole)[number]\n self.db.delete(something)\n self.search()", "def DeleteRow(self, entry):\n for a_link in entry.link:\n if a_link.rel == 'edit':\n return self.Delete(a_link.href)", "def delete_link(update: Update, context: CallbackContext):\n query = update.callback_query\n link_id = query.data.split(\"delete:\")[1]\n\n with db.connect() as connection:\n link = db.get_link(connection, link_id)\n\n context.bot.edit_message_text(\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=f'You are about to delete a link to \"{link.title}\" ({link.url})\\nAre you sure?',\n reply_markup=keyboards.link_delete(link),\n )\n\n query.answer()", "def deleterecord(phones,username,phonenum):\r\n if username in phones:\r\n del phones[username]\r\n else:\r\n raise ValueError(\"This username are not exist\")", "def delete(url, data=None, **_):\n # Checks input parameters\n assert '/process/%s' % dummy_id in url in url", "def delete_link(self, word):\n meaning = self.word2meaning[word]\n print(str(self.unique_id) + \" forgot \" +\n str(word) + \" for \" + str(meaning))\n del self.word2meaning[word]\n del self.meaning2word[meaning]\n del self.wordsuccess[word]\n\n # If the agent was the only one using the word, delete the word\n if len(self.model.vocabulary[meaning][word]) == 1:\n del self.model.vocabulary[meaning][word]\n # Else simply remove the agent\n else:\n self.model.vocabulary[meaning][word].remove(self.unique_id)", "def delete():\n global num_carte, code_secret, montant\n length_card = len(card_num_entry.get())\n if length_card in [5, 10, 15]:\n num_carte = num_carte[:-2]\n card_num.set(num_carte)\n else:\n num_carte = num_carte[:-1]\n card_num.set(num_carte)\n\n code_secret = code_secret[:-1]\n code.set(code_secret)\n\n montant = str(montant)[:-1]\n amount.set(montant)", "def unlink(address):", "async def remove(self, ctx: Context, url: str):\n try:\n index = int(url)\n if 0 < index <= len(self.urls):\n removed_url = self.urls[index - 1]\n del self.urls[index - 1]\n self.write_vac()\n await ctx.channel.send('Removed <{}> from checker.'.format(removed_url))\n else:\n await ctx.channel.send('{} is not a valid index.'.format(index))\n except ValueError:\n if url in self.urls:\n self.urls.remove(url)\n self.write_vac()\n await ctx.channel.send('Removed <{}> from checker.'.format(url))\n else:\n await ctx.channel.send('<{}> is not registered to checker.'.format(url))", "def remove_link(request, ck, link_name):\n\n refresh_template = request.session[constants.ACTUAL_TEMPLATE]\n\n links = request.session[constants.ADD_LINKS]\n if '://' not in link_name:\n link_name = link_name.replace(':/', '://')\n link = next(el for el in links if el.link == link_name)\n if ck != \"0\":\n coding = get_object_or_404(CodingProject, id=ck)\n\n # TODO: Review this \n us = get_user(request)\n user = us\n\n # Coding must have been created by the current user and\n if coding.coder != user.id:\n raise Http404\n\n if coding.links.filter(link=link_name):\n cache_list = request.session[constants.REM_LINKS]\n cache_list.append(link)\n\n links.remove(link)\n request.session[constants.ADD_LINKS] = links\n\n # TODO: Centralize this?\n return HttpResponseRedirect(refresh_template)", "def management_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n management_reference = get_object_or_404(Management, id=id,company=company)\n\n #deletes the view and redirects to the page.\n management_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def delete(request):\n if request.method == \"POST\":\n Books.objects.get(isbn=request.POST['delete_book']).delete()\n return redirect('libros:home')", "def delete_menu():", "def delete_entry(self, index, url, info):\r\n\r\n # Retrive all items fom url\r\n info = requests.get(url, headers=self.headers).json()['results']\r\n idd = 0\r\n # Find the index entry\r\n for entry in info:\r\n if int(entry['id']) == index:\r\n idd = entry['id']\r\n \r\n # If the schedule id doesn't exist\r\n if idd == 0:\r\n return False\r\n # Delete request\r\n return self.check_delete(url, info, index)", "def delete_model(self, request, obj):\n obj.post.likeNumDreacase()\n obj.delete()", "def delete_by_index(self, index):\n cur = self.head\n length=self.get_length()\n if type(index) is int:\n if self.is_empty():\n return\n else:\n if index > length:\n # The index value is out of range and prompts and exits\n print(\"Index is out of range.\")\n return\n else:\n if index == 0:\n if cur.next == None:\n self.head = None\n else:\n cur.next.prev = None\n self.head = cur.next\n return\n else:\n while (index) > 0:\n cur = cur.next\n index -= 1\n\n # Point the next node of cur to the next node of cur\n cur.prev.next = cur.next\n # Point the prev of the next node of cur to the previous node of cur\n cur.next.prev = cur.prev\n length -= 1\n return\n else:\n print(\"Index value is not int.\")\n return", "def delete(self, *args, **kwargs):\n return 0", "def deleteRecord(self):\n selectedData = self.controller.chooseRecord(\"Enter the record number: \") - 1\n if selectedData >= (len(self.dto.getRecord())):\n print(\"Please choose number within the number of records.\")\n else:\n print(self.dto.getRecord()[selectedData].__dict__)\n if self.controller.confirmMsg(\"Do you want to delete this data? (y/n): \") == \"y\":\n self.dto.getRecord().remove(self.dto.getRecord()[selectedData])\n print(\"Record deleted.\")", "def delete_model(self, request, obj):\n obj.post.comNumDrease()\n obj.delete()", "def acquisition_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n acquisition_reference = get_object_or_404(Acquisition, id=id,company=company)\n\n #deletes the view and redirects to the page.\n acquisition_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def del_node(node, delnum):\n pass", "def delete(self):\n ...", "def delete_donation():\n print()\n print('Welcome to the Delete a a Donor Donation Menu')\n print()\n donor_name = get_name_input()\n single_donor_print(donor_name)\n print('See the donation you want to delete in the report? Follow the prompts to enter donation to delete')\n donation_delete = check_number_input()\n print()\n delete_donation_from_db(donor_name,donation_delete)\n print('Donation has been deleted. See report below for verification')\n single_donor_print(donor_name)", "def relink(self, link_id):", "def award_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n award_reference = get_object_or_404(Award, id=id,company=company)\n\n #deletes the view and redirects to the page.\n award_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def close_account(self, conn, number):\n sql = \"DELETE FROM card WHERE number=?\"\n c = conn.cursor()\n c.execute(sql, (number,))\n conn.commit()\n self.menus()", "def delete(id):\n get_autor(id)\n try:\n db.insert_bd('DELETE FROM autor WHERE id = %d' % id)\n return redirect(url_for('autor.index'))\n except:\n return render_template('404.html')", "def remove_link(self, dest):\n for i, link in enumerate(self.runscript.links):\n if link[1] == dest:\n del self.runscript.links[i]\n break", "async def command_del(ctx, *args):\r\n if len(args) != 1:\r\n await ctx.send('Del takes 1 parameter: id')\r\n return\r\n\r\n try:\r\n docid = int(args[0])\r\n except:\r\n await ctx.send(f'Rem expects a number: id')\r\n return\r\n\r\n if not db.contains(doc_id=docid):\r\n await ctx.send(f'No rule with id {docid}')\r\n return\r\n\r\n db.remove(doc_ids=[docid])\r\n await ctx.send(f'Removed rule {docid}')\r\n await update_roles(ctx.guild)", "def funding_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n funding_reference = get_object_or_404(Funding, id=id,company=company)\n\n #deletes the view and redirects to the page.\n funding_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def link_delete_callback(self):\n pass", "def command_remove(arguments):\n global current_name\n tag, target, *rest = arguments[0], arguments[1]\n inverse_tag = rest[0] if rest else Network.reciprocal(tag)\n try:\n network.unlink(current_name, tag, target, inverse_tag)\n return 'Removed link \"' + tag + \": \" + target + '\"'\n except ValueError:\n return \"No such link.\"", "def delete(self, value):\n pass", "def delete_entry(self, scenario_info):\n sql = self.delete(\"id\")\n self.cur.execute(sql, (scenario_info[\"id\"],))", "def delete_link(db_object, text=None):\n if text is None:\n text = 'delete'\n return _make_link(db_object.delete_url(), text)", "def del_accomment(request, pk):\n\n comment = get_object_or_404(ActorComment, pk=pk)\n comment.delete()\n actor = comment.actor\n url = '../../' + str(comment.actor.pk)\n return redirect(url)", "def delete(self):\n # gid must be specified for deletion\n gid = self.get_query_argument('gid')\n self.write(self._rpc.aria2.remove(self._token, gid))", "def delete(self, request, phone):\n attrs = self.flatten_dict(request.POST)\n try:\n endpoint = Endpoint.objects.get(uid__exact=phone, site__name__exact=request.user)\n np = NumberPlan.objects.get(phone_number=phone, site__name__exact=request.user)\n endpoint.enable=False\n np.status=2\n endpoint.save()\n np.save()\n # TODO add parking\n return rc.DELETED\n except:\n return rc.NOT_HERE", "def delete_user():", "def delete(self, key):\n\n hi = self.hash_index(key)\n\n # if that hi is empty ignore\n # if self.storage[hi] is None:\n # print(\"WARNING: no key\")\n # return\n\n current = self.storage[hi]\n prev = self.storage[hi]\n while current and current.key != key:\n prev = current\n current = current.next\n\n if (current and current.key == key):\n # if its the first link in the list\n if (current == self.storage[hi]):\n self.storage[hi] = current.next\n else:\n prev.next = current.next\n\n self.numberOfItems -= 1\n else:\n print(\"WARNING: no key\")\n\n self.calculateLoad()", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "def delete(request, content_type, object_id):\n user = request.user\n content_type_object = ContentType.objects.get(id = content_type)\n node = content_type_object.model_class().objects.get(id = object_id)\n community_wiki.delete_content(node)\n \n redirect_url = reverse('content-list-redirect', args=[content_type_object.id])\n return http.HttpResponseRedirect(redirect_url)", "def crawlerDelete(crawlerid):\n sclogic.crawlerDelete(crawlerid)", "def do_del_item(self, arg):\n try:\n del_item = arg[\"<list_name>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n del_item_str = \" \".join(del_item)\n print(del_item_str)\n elif choice == \"id\":\n del_item_str = int(\" \".join(del_item))\n print (del_item_str)\n app.ToDoApp.to_delete_item(del_item_str)\n print (\"Item deleted\")\n\n\n \n except ValueError as e:\n cprint((e), 'red')", "def remove_bookmark(request):\r\n \r\n if request.GET:\r\n if request.GET.get('path'):\r\n next = urllib.unquote(request.GET.get('path'))\r\n try:\r\n bookmarkitem = BookmarkItem.objects.get(bookmark__user=request.user, link=urllib.unquote(request.GET.get('path')))\r\n bookmarkitem.delete()\r\n msg = ['success', 'A pagina foi removida dos Atalhos']\r\n except BookmarkItem.DoesNotExist:\r\n msg = ['error', 'A pagina não pode ser removida dos Atalhos']\r\n else:\r\n msg = ['error', 'A pagina não pode ser removida dos Atalhos']\r\n next = ADMIN_URL\r\n else:\r\n msg = ['error', 'A pagina não pode ser removida dos Atalhos']\r\n \r\n # MESSAGE & REDIRECT\r\n if not request.session.get('grappelli'):\r\n request.session['grappelli'] = {}\r\n request.session['grappelli']['message'] = msg\r\n request.session.modified = True\r\n return HttpResponseRedirect(next)", "def office_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n office_reference = get_object_or_404(Office, id=id,company=company)\n\n #deletes the view and redirects to the page.\n office_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def delete_line(command):\n try:\n if len(command) > 1:\n my_file.delete_num(int(command[1]))\n else:\n my_file.delete_num()\n except FileNotFoundError:\n print('No file has been read yet')\n except ValueError:\n print('The line number is not an integer')\n except IndexError:\n print('Line number is out of range')", "def delete_data(request, result_id):\n result = TestResult.objects.get(id=result_id)\n result.delete()\n gun = result.bullet.gun\n return HttpResponseRedirect(reverse('gun', args=[gun.id]))", "def delete_meal():", "def delete_related(request, scheme_id):\n scheme = get_object_or_404(ShamirSS, pk=scheme_id)\n documents = get_list_or_404(Document, scheme=scheme)\n form = DeleteRelatedForm()\n if request.method == 'POST':\n Document.objects.filter(scheme=scheme).delete()\n return redirect('/s')\n else:\n return render(request, 'shared_secret/del_related.html', {\n 'scheme': scheme,\n 'documents': documents,\n 'form': form\n })", "def del_awcomment(request, pk):\n comment = get_object_or_404(AwardComment, pk=pk)\n comment.delete()\n award = comment.award\n url = '../../' + str(comment.award.pk)\n return redirect(url)", "def _DeleteAclRule(self, entry):\n\n self.cal_client.Delete(entry.GetEditLink().href)", "def delete_user(id):\n pass", "def delete(d):\n conn_obj = mysql.connector.connect(host='localhost',database='mydb',user='root',password='kks')\n cur_obj = conn_obj.cursor()\n cur_obj.execute(\"DELETE FROM book WHERE isbn = %s\",(d,))\n conn_obj.commit()\n conn_obj.close()", "def DELETE(uid: str) -> None:\n\n try:\n record_id = int(uid)\n except ValueError as exc:\n raise cherrypy.HTTPError(400, \"Invalid uid\") from exc\n\n deleted_rows = cherrypy.engine.publish(\n \"bookmarks:remove\",\n record_id\n ).pop()\n\n if not deleted_rows:\n raise cherrypy.HTTPError(404, \"Invalid url\")\n\n cherrypy.response.status = 204", "def delete(no):\n\n conn = sqlite3.connect(\"person_database.bd\")\n c = conn.cursor()\n\n # delete a record\n c.execute(f\"DELETE from person_info WHERE oid= \" + str(no))\n\n conn.commit()\n conn.close()", "def delete(id):\r\n get_post(id)\r\n db = get_db()\r\n db.cursor().execute('DELETE FROM novel.post WHERE id = %s', id)\r\n db.commit()\r\n return redirect(url_for('novel.index'))", "def delete(id):\r\n\r\n db = get_db()\r\n b_id = session.get(\"user_id\")\r\n query = \"UPDATE product SET quantity = 0 WHERE product_id = ? AND for_business = ?\"\r\n db.execute(query, (id, b_id,))\r\n db.commit()\r\n return redirect(url_for(\"main.products\"))", "def delete_answer(request, answer_id):\n raise NotImplementedError", "def POST_delete_link_img(self, res, link, name):\r\n # just in case we need to kill this feature from XSS\r\n if g.css_killswitch:\r\n return self.abort(403,'forbidden')\r\n link.del_image(name)\r\n link._commit()\r\n # hide the image and it's container\r\n res._hide(\"img-li_%s\" % name)\r\n # reset the status\r\n res._update('img-status', innerHTML = _(\"Deleted\"))", "def do_del(self, arg):\n try:\n del_list = arg[\"<list_name>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n del_list_str = \" \".join(del_list)\n print(del_list_str)\n elif choice == \"id\":\n del_list_str = int(\" \".join(del_list))\n print (del_list_str)\n app.ToDoApp.to_delete_todo(del_list_str)\n print (\"List deleted\")\n\n\n \n except ValueError as e:\n cprint((e), 'red')", "def deleteEntry (self,event=None):\n \n c = self.c ; box = self.box\n \n # Work around an old Python bug. Convert strings to ints.\n items = box.curselection()\n try:\n items = map(int, items)\n except ValueError: pass\n \n if items:\n n = items[0]\n p = self.position[n]\n del self.positionList[n]\n if p in c.visitedList:\n c.visitedList.remove(p)\n self.fillbox()", "def iptables_delete(nid, rid):\n if nid != -1 and (hl.getNode(\"ID\",nid) and hl.getNode(\"ID\",nid)[\"Address\"] != \"self\"):\n url = hl.getNode(\"ID\", nid)[\"Address\"] \n hl.nodePost(url+\"/deleterule/\",{\"ID\" : rid}) \n else:\n hl.removeIPRule(rid)\n \n return redirect(url_for('confirm', confirmed = \"IP Table Rule Deleted!\"))", "def delete(self, request, domain_id):\n domain = get_object_or_404(models.IPBlocklist, id=domain_id)\n domain.delete()\n return redirect(\"settings-ip-blocks\")", "def delete_task(self):\n tasks = self.session.query(self.Table).order_by(self.Table.deadline).all()\n if tasks:\n print('Chose the number of the task you want to delete:')\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}. {task.deadline.strftime(\"%d %b\")}')\n self.session.query(self.Table).filter(self.Table.id == tasks[int(input())-1].id).delete()\n self.session.commit()\n else:\n print('Nothing to delete!')\n print()", "def delete_link(user, mapid, subtopic, url): #in context of a map.\n the_map = get_map(user, mapid)\n links = the_map.subtopics[subtopic].urls\n links.pop(url)\n the_map.subtopics[subtopic].urls = links\n save_map(user, mapid, the_map)", "def delete(self, application_id):", "def delreply(request, post_id):\n if not request.user.is_authenticated():\n return redirect('/login/?next=%s' % request.path)\n else:\n\n reply = Reply.objects.get(id = post_id)\n \n reply.delete() \n return redirect('/home/')", "def cancella_acquistati(request, legahash, astaid, numero=0):\n TrasferimentoRosa.objects.filter(asta_id=astaid).latest('id').delete()\n return HttpResponse(\"Cancellato/i!\")", "def delete(id):\n\tget_post(id)\n\tdb = get_db()\n\tget_post(id)\n\tdb = get_db()\n\tdb.execute('DELETE FROM post WHERE id = ?', (id,))\n\tdb.commit()\n\treturn redirect(url_for('blog.index'))", "def delete(device):\n delete_subject(device)\n return redirect_back('index')", "def delete(short_id):\n try:\n url = Url.get(short_id)\n except:\n return jsonify({\"Error\", \"No Such ID\"})\n\n url.delete()\n return jsonify({\"statusCode\": 301,})", "def remove_directory():\r\n count = 1\r\n # Creates a dict to map user selection numbers to keys of the data dict\r\n deleteDict = {}\r\n print('\\n')\r\n for key in sorted(data):\r\n if not key.startswith('defaultdirectory'):\r\n print(\"{}. {} --> {}\".format(count, key, data[key]))\r\n deleteDict[count] = key\r\n count += 1\r\n print(\"{}. Cancel\".format(count))\r\n selection = input(\"\\nSelect the number of the directory you want to delete:\\n\").strip()\r\n while (not selection.isdigit()) or (int(selection) not in deleteDict) and \\\r\n (int(selection) != count):\r\n selection = input(\"Invalid selection. Select the number of the directory you want \"\r\n \"to delete:\\n\").strip()\r\n selection = int(selection)\r\n if selection != count:\r\n print(\"\\n*** {} has been deleted\".format(deleteDict[selection]))\r\n del data[deleteDict[selection]]\r\n save_json()", "def delete(self, record):\n\n s = record.split()\n if len(s) != 3:\n sys.stderr.write('The format of the input should be like this: meal breakfast -50.\\\n \\nFail to delete a record.\\n')\n elif self._records.count(record) > 1:\n try:\n d = int(input(f'Which line of the record \"{record}\" is going to be deleted? '))\n testlist = []\n for i, v in enumerate(self._records):\n if v == record:\n testlist.append(i+1) # testlist contains the records that is identical to the input\n assert d in testlist\n except ValueError:\n sys.stderr.write('Invalid input. Should be an integer.\\nFail to delete a record.\\n')\n except AssertionError:\n sys.stderr.write(f'Invalid input number. No record of \"{record}\" in line {d}.\\\n \\nFail to delete a record')\n else:\n del(self._records[d-1])\n elif self._records.count(record) == 1:\n self._records.remove(record)\n else:\n sys.stderr.write(f'There\\'s no record with \"{record}\".\\nFail to delete a record.\\n')", "def delete_entry(entry_id):\n\n # grabs the specific entry id\n entry = Entry.query.get(entry_id)\n\n # grabs the user id in the session\n user_id = session.get(\"user_id\")\n\n # prevents the public for accessing user specific information\n if not session.get(\"user_id\") or session[\"user_id\"] != user_id:\n return redirect(\"/\")\n\n # removes an entry from the database\n db.session.delete(entry)\n db.session.commit()\n\n # flash a message to show confirmation for the user\n flash(\"You have successfully deleted an entry!\")\n\n return redirect(f\"all-entries/{user_id}\")", "def delete(self, request, url_id, *args, **kwargs):\n url_instance = self.get_object(url_id, request.user.id)\n if not url_instance:\n return Response(\n {\"detail\": \"Object with url id does not exists\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n url_instance.delete()\n return Response(\n {\"detail\": \"Object deleted!\"}, status=status.HTTP_200_OK\n )", "def cmd_delete_job():\r\n id = request.form.get('id', \"\")\r\n confirm = request.form.get(\"confirm\", \"\")\r\n if confirm != \"DELETE\":\r\n flash(f\"Contact '{id}' NOT deleted. Please enter DELETE in the confirm field.\")\r\n return redirect(url_for('main.jobs'))\r\n \r\n index = get_job_by_id(id)\r\n Job.query.filter(Job.id == id).delete()\r\n db.session.commit()\r\n\r\n\r\n if index != None:\r\n flash(f\"Job '{id}' was succesfully deleted!\")\r\n return redirect(url_for('main.jobs'))\r\n else:\r\n flash(f\"Job '{id}' was not found\")\r\n return redirect(url_for('main.jobs'))", "def delete_key(uid):\n if request.method == 'POST':\n hl.deleteUser(uid)\n return redirect('/users')", "def numeros_by_id_delete_view(request):\n # Check authorization\n if not Utils.has_permission(request, request.registry.settings['affaire_numero_edition']):\n raise exc.HTTPForbidden()\n\n settings = request.registry.settings\n projet_id = int(settings['numero_projet_id'])\n abandonne_id = int(settings['numero_abandonne_id'])\n\n # Get numero by id\n id = request.matchdict['id']\n query = request.dbsession.query(Numero).filter(\n Numero.id == id).first()\n\n if query:\n if query.etat_id == projet_id:\n query.etat_id = abandonne_id\n elif query.etat_id == abandonne_id:\n query.etat_id = projet_id\n\n return Utils.get_data_save_response(Constant.SUCCESS_DELETE.format(Numero.__tablename__))", "def del_number(self, number):\r\n if number in self.numbers:\r\n self.numbers.remove(number)", "def admindelete(object, id):\n db = get_db()\n execute_str = 'DELETE FROM ' + object + ' WHERE id = ' + str(id)\n db.execute(execute_str)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))", "def delete(request, todo_id):\n\n todo = get_object_or_404(Todo, pk=todo_id)\n todo.delete()\n\n return redirect('index')", "def delete_entry(key):\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n if key in db:\n confirm = input(\"Delete {name} [y/n]: \".format(name=key))\n if confirm.lower() == 'y':\n print(\"Deleting entry ..... {name}\\n\".format(name=key))\n del db[key]", "def delete_song(self):\r\n song_id = tuple(input(\"Give the melody id to be deleted:\\t\"))\r\n sql = \"SELECT file_title, form FROM songs WHERE id = %s\" # Check existence of song with given ID\r\n self.cursor.execute(sql, song_id)\r\n result = self.cursor.fetchall()\r\n if len(result) > 0:\r\n path = self.p_storage + \"/\" + result[0][0] + \".\" + result[0][\r\n 1] # Find path of song by appending the name and format to the storage directory path\r\n os.remove(path) # Remove song from directory\r\n sql = \"DELETE FROM songs WHERE id = %s\" # Delete song from database\r\n self.cursor.execute(sql, song_id)\r\n self.cnx.commit()\r\n print(self.cursor.rowcount, \"record(s) deleted\")\r\n else:\r\n print(\"Give a valid id...\")", "def test_delete_link_no_resources(self):\n g = groups.get_by_name(\"fifth group\")\n \n self.open_url('/group/list')\n \n deletelink = self.wd.find_element(By.ID, \"delete-link-{0}\".format(g.id))\n deletelink.click()\n \n alert = self.wd.switch_to_alert()\n self.assertEqual(\"Are you sure you want to remove group {0} (id={1})\".format(g.name, g.id), alert.text)\n alert.accept()\n \n self.assert_notification(\"Group deleted: {0} (id={1})\".format(g.name, g.id))\n self.assert_not_in_list_table(g.name)", "def delete_field(self, url, data):\r\n while data != []:\r\n for field in data:\r\n if 'id' in field:\r\n requests.delete(url + str(field['id']), headers=self.headers)\r\n data.clear()\r\n data = requests.get(url, headers=self.headers).json()['results']", "def _delete():\n\tquery = myTaskSession.query(WorkToolkitDB.db.Task)\n\n\tIDStr = myOpt.id\n\tIDs = re.split('\\s*,\\s*', IDStr)\n\n\tif len(IDs) == 0:\n\t\tprint('ERR: no deleting id input')\n\t\treturn 1\n\n\tfor ID in IDs:\n\t\tmyTask = query.get(ID)\n\t\tmyTaskSession.delete(myTask)\n\n\t\n\tmyTaskSession.commit()\n\n\treturn 0", "def competitors_delete(request, slug,id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n competitors_reference = get_object_or_404(Competitors, id=id,company=company)\n\n #deletes the view and redirects to the page.\n competitors_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def delete(self):\n pass" ]
[ "0.7272275", "0.7188536", "0.6581015", "0.6359355", "0.63263273", "0.62977743", "0.6175496", "0.6145932", "0.6075425", "0.607078", "0.6014249", "0.59680676", "0.59213567", "0.59148896", "0.5872791", "0.5862901", "0.5848978", "0.58246154", "0.5816256", "0.58000165", "0.57922125", "0.5789605", "0.5779743", "0.577037", "0.5767772", "0.5743014", "0.5734665", "0.5719346", "0.5715513", "0.5714472", "0.57066363", "0.5699618", "0.5671158", "0.56574506", "0.5648838", "0.5641116", "0.5637347", "0.56338453", "0.5624599", "0.56241447", "0.56162435", "0.56116056", "0.5601081", "0.5579467", "0.55781084", "0.557555", "0.5573007", "0.5553591", "0.55528444", "0.55511624", "0.5546994", "0.5543394", "0.55353695", "0.5530355", "0.55260736", "0.55187625", "0.5518175", "0.5516404", "0.5514569", "0.5509472", "0.5505195", "0.5504412", "0.54949355", "0.549008", "0.54879814", "0.54858327", "0.5480313", "0.5476326", "0.5468403", "0.54605895", "0.5459529", "0.545916", "0.5451534", "0.5446775", "0.5446186", "0.54447824", "0.54432046", "0.54399174", "0.5434339", "0.5429967", "0.54282284", "0.5422928", "0.5419015", "0.54167485", "0.54152644", "0.54092884", "0.54086226", "0.54038155", "0.54023165", "0.5402244", "0.5399221", "0.53988284", "0.5394599", "0.53935885", "0.5390812", "0.53827906", "0.53695863", "0.5368599", "0.5364259", "0.536355" ]
0.71173656
2
perform_destroy is used to performance a logic delete
def perform_destroy(self, instance): instance.is_active = not instance.is_active instance.save()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perform_destroy(self, instance):\n pass", "def do_destroy(self, arg):\n obj = self.verify(arg, 2)\n if obj:\n del storage.all()[obj]\n storage.save()", "def destroy(self):", "def destroy(self):", "def destroy(self):", "def delete():", "def _destroy(self):", "def post_destroy(self) -> Any:\n raise NotImplementedError", "def __del__(self):\n self.execute()", "def pre_destroy(self) -> Any:\n raise NotImplementedError", "def delete(self):\n ...", "def do_destroy(self, arg):\n jail_destroy('destroy', arg)", "def destroy(self):\n pass", "def destroy(self):\n pass", "def destroy(self):\n pass", "def destroy(self):\n pass", "def _delete_job(self, job):", "def destroy(self):\n\n pass", "def destroy():\n pass", "def destroy():\n pass", "def do_destroy(self, arg):\n args = shlex.split(arg)\n stored_objects = models.storage.all()\n\n if self.basic_errs(args):\n '''check if instance exists'''\n instance = self.check_instance(args[0], args[1], stored_objects)\n if instance:\n \"\"\"delete from FileStorage.__objects\"\"\"\n del stored_objects[instance]\n \"\"\"overwrite the new data to file.json\"\"\"\n models.storage.save()", "def destroy(self):\n pass # Nothing for now", "def main(self):\n self.delete_details()\n self.delete_cleaned()\n self.vacuum()", "def _post_delete(self, instance, **kwargs):\n pk_name = instance._meta.pk.name\n for key in self.cache_fields:\n if key in ('pk', pk_name):\n continue\n # remove pointers\n cache.delete(self._get_from_cache_key(**{key: getattr(instance, key)}))\n # remove actual object\n cache.delete(self._get_from_cache_key(**{pk_name: instance.pk}))", "def delete(self):\n self.__generates -= 1\n if self.__generates < 0:\n self.notify.debug('DistributedObjectAI: delete() called more times than generate()')\n if self.__generates == 0:\n # prevent this code from executing multiple times\n if self.air is not None:\n # self.doId may not exist. The __dict__ syntax works around that.\n assert self.notify.debug('delete(): %s' % (self.__dict__.get(\"doId\")))\n\n #if not self._DOAI_requestedDelete:\n # # this logs every delete that was not requested by us.\n # # TODO: this currently prints warnings for deletes of objects\n # # that we did not create. We need to add a 'locally created'\n # # flag to every object to filter these out.\n #\n # DistributedObjectAI.notify.warning(\n # 'delete() called but requestDelete never called for %s: %s'\n # % (self.__dict__.get('doId'), self.__class__.__name__))\n #\n # # print a stack trace so we can detect whether this is the\n # # result of a network msg.\n # # this is slow.\n # from direct.showbase.PythonUtil import StackTrace\n # DistributedObjectAI.notify.warning(\n # 'stack trace: %s' % StackTrace())\n\n self._DOAI_requestedDelete = False\n\n self.releaseZoneData()\n\n # Clean up all the pending barriers.\n for barrier in self.__barriers.values():\n barrier.cleanup()\n self.__barriers = {}\n\n # DCR: I've re-enabled this block of code so that Toontown's\n # AI won't leak channels.\n # Let me know if it causes trouble.\n ### Asad: As per Roger's suggestion, turn off the following\n ### block until a solution is thought out of how to prevent\n ### this delete message or to handle this message better\n # TODO: do we still need this check?\n if not getattr(self, \"doNotDeallocateChannel\", False):\n if self.air:\n self.air.deallocateChannel(self.doId)\n self.air = None\n\n self.parentId = None\n self.zoneId = None\n self.__generated = False", "def perform_destroy(self, instance):\n self.object.comments -= 1\n self.object.save()\n instance.delete()", "def destroy(self):\n raise NotImplementedError()", "def destroy_check(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self, obj):", "def do_destroy(self, arg):\n arg = arg.split()\n try:\n args = arg[0] + \".\" + arg[1]\n except:\n pass\n objects = storage.all()\n if len(arg) is 0:\n print(\"** class name missing **\")\n elif len(arg) == 1 and arg[0] in self.dict.keys():\n print(\"** instance id missing **\")\n elif arg[0] not in self.dict.keys():\n print(\"** class doesn't exist **\")\n elif args not in objects:\n print(\"** no instance found **\")\n else:\n del objects[args]\n storage.save()", "def Destroy(self):\n raise NotImplementedError", "def delete(self, *args, **kwargs):\n return 0", "def perform_destroy(self, instance):\n instance.is_active = False\n instance.save()", "def deleteOrDelay(self):\n self.delete()", "def deleteTask():\n\tmarkOff(isdelete = 1)", "def do_destroy(self, *args):\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) != 2:\n print(\"** instance id missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n del dict_objs[key]\n storage.save()\n else:\n print(\"** no instance found **\")", "def doDestroyMe(self, *args, **kwargs):\n self.target_domain = None\n self.renew_years = None\n self.sync_contacts = None\n self.sync_nameservers = None\n self.verify_owner = None\n self.latest_domain_info = None\n self.destroy()", "def destroy(self):\n return True", "def __del__(self):\n self.destroy()", "def perform_destroy(self, instance):\n instance.subscription_set.filter(owner=self.request.user).delete()", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def cleanup(self, *args, **kwargs):", "def test_delete_run(self):\n pass", "def _queue_delete(self, queue):\n\n queue.delete()", "def _delete(performer):\n if not isinstance(performer, helper._AelObjectPerformer):\n raise Exception('Invalid delete performer type')\n\n try:\n util.delete(\n obj=performer.getObject(), testmode=performer.isInTestMode()\n )\n except Exception as e:\n raise Exception('Failed to delete %s: %s' % (performer._name, str(e)))\n\n return", "def delete(self):\n raise NotImplementedError", "def after_delete(self, obj, st):\n pass", "def force_delete(self):\n self.manager.force_delete(self)", "def force_delete(self):\n self.manager.force_delete(self)", "def onDestroy(self):\n pass", "def do_destroy(self, arg):\n if len(arg) == 0:\n print(\"** class name missing **\")\n return\n coms = tuple(arg.split())\n if coms[0] not in self.cls:\n print(\"** class doesn't exist **\")\n elif len(coms) < 2:\n print(\"** instance id missing **\")\n else:\n obj = coms[0] + \".\" + coms[1]\n if obj not in storage.all().keys():\n print(\"** no instance found **\")\n else:\n del storage.all()[obj]\n storage.save()", "def do_destroy(self, args):\n args = shlex.split(args)\n dicti = storage.all()\n if not args:\n print(\"** class name missing **\")\n elif not args[0] in name_of_class:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif \"{}.{}\".format(args[0], args[1]) in dicti:\n dicti.pop(\"{}.{}\".format(args[0], args[1]))\n storage.save()\n else:\n print(\"** no instance found **\")", "def __delete__(self):\n pass", "def doDestroyMe(self, *args, **kwargs):\n self.target_domain_name = None\n self.auth_info = None\n self.latest_domain_info = None\n self.destroy()", "def __del__(self):\r\n self.cleanup()", "def destroy(self):\r\n self.__destroy()", "def soft_delete(self, instance):\n self.destroy(instance)", "def __on_delete(self):\n self.image.delete()", "def __on_delete(self):\n self.image.delete()", "def purge(self):\n pass", "def delete(self, *args, **kwargs):\n self.flush_from_cache()\n self._is_deleted = True\n super().delete(*args, **kwargs)", "def delete(self):\n return super(Task, self).delete(None)", "def delete(self, identifier):\n self.get(identifier)\n conn = self.get_connector()\n cursor = conn.cursor()\n\n query = \"delete from {0} where {2}={1}\".format(\n self.ressource_config[\"table\"],\n identifier,\n self.model.pk_field.name)\n try:\n cursor.execute(query)\n except sqlite3.IntegrityError, e:\n message = \"\"\n if \"foreign\" in e.message:\n message = \"\"\"another ressource depends on this\n object. Cloud not delete before all ressources\n depending on it are also deleted\"\"\"\n\n raise BadRequest(message)\n\n conn.commit()\n conn.close()", "def __del__(self):\n if not self._getter and conf.PROGRESSBAR_DESTROY_ON_EXIT:\n self._db_obj.delete()", "def delete(self):\n self.manager.delete(self)", "def destroy(self, userdata):\r\n pass", "def __del__(self) -> None:\n self.delete()", "def do_delete(self, arg):\n \treturn False", "def destroy(self):\n raise NotImplementedError('You must implement the destroy() method '\n 'yourself!')", "def finalizer():\n for resource_type in pods, pvcs, storageclasses, secrets:\n for resource in resource_type:\n resource.delete()\n resource.ocp.wait_for_delete(resource.name)\n if pools:\n # Delete only the RBD pool\n pools[0].delete()\n if projects:\n for project in projects:\n project.delete(resource_name=project.namespace)\n project.wait_for_delete(project.namespace)", "def do_destroy(self, arg):\n arg_list = arg.split(\" \") if type(arg) == str else arg\n if not arg:\n print(\"** class name missing **\")\n return\n if arg_list[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n if len(arg_list) < 2:\n print(\"** instance id missing **\")\n return\n key = arg_list[0] + \".\" + arg_list[1]\n if key in storage.all():\n del storage.all()[key]\n storage.save()\n return\n print(\"** no instance found **\")", "def do_destroy(self, arg):\n args = arg.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n elif len(args) < 2 and args[0] in self.class_dict:\n print(\"** instance id missing **\")\n return\n elif len(args) < 2:\n print(\"** class name missing **\")\n return\n\n object_dict = storage.all()\n if args[0] in self.class_dict:\n for full_key in object_dict:\n key = full_key.split(\".\")\n if key[1] == args[1]:\n del object_dict[full_key]\n storage.save()\n return\n print(\"** no instance found **\")\n else:\n print(\"** class doesn't exist **\")", "def post_delete_access_attempt(self, instance, **kwargs):", "def delete(self, *args, **kwargs):\n self.image.storage.delete(self.image.name)\n delete(self.image)\n super().delete(*args, **kwargs)", "def _Delete(self):\n pass", "def delete(self, obj):\n raise NotImplementedError", "def judge_destroy(self):\n pass", "def prepareToDelete(self):\n pass", "def delete(self):\n self.manager.delete(self.name)", "def delete(self):\n self.manager.delete(self.name)", "def cleanup(self):", "def cleanup(self):", "def cleanup(self):", "def delete(self, *args, **kwargs) -> Any:\n pass", "def beforeDelete(self):", "def delete(self, obj=None):\n pass", "def do_destroy(self, args):\n args = args.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n if args[0] not in HBNBCommand.class_check:\n print(\"** class doesn't exist **\")\n return\n\n all_objs = storage.all()\n key = args[0] + '.' + args[1]\n if key in all_objs:\n all_objs.pop(key)\n storage.save()\n else:\n print(\"** no instance found **\")", "def delete(self, _id):", "def do_destroy(self, arg):\n args = shlex.split(arg)\n if len(args) == 0:\n print(\"** class name missing **\")\n elif args[0] in class_type:\n if len(args) > 1:\n key = args[0] + \".\" + args[1]\n if key in models.storage.all():\n models.storage.all().pop(key)\n models.storage.save()\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")", "def delete(self):\n raise NotImplementedError()", "def delete(self):\n raise NotImplementedError()", "def __del__(self):\n self.finish()", "def can_fast_delete(self, *args, **kwargs):\n return False", "def delete(self):\n\n raise NotImplementedError()" ]
[ "0.7563987", "0.6974102", "0.69603705", "0.69603705", "0.69603705", "0.68129027", "0.67679864", "0.6733365", "0.67058355", "0.6675282", "0.6674819", "0.66600496", "0.66232634", "0.66232634", "0.66232634", "0.66232634", "0.65740126", "0.6546043", "0.6532895", "0.6532895", "0.6471276", "0.64684165", "0.6442513", "0.64347124", "0.6396978", "0.6380684", "0.63653576", "0.6357649", "0.6352008", "0.6352008", "0.6352008", "0.6352008", "0.6293862", "0.6282352", "0.62690556", "0.62218994", "0.6216835", "0.62095124", "0.6197164", "0.6187801", "0.6179276", "0.6178347", "0.6176019", "0.6172301", "0.6163763", "0.6163763", "0.6151269", "0.6140792", "0.61335343", "0.61171025", "0.6103702", "0.6103206", "0.60961103", "0.60961103", "0.6092778", "0.609246", "0.6088671", "0.6070594", "0.6068217", "0.60679597", "0.6060587", "0.60578", "0.604133", "0.604133", "0.6033328", "0.6028802", "0.60263497", "0.6013492", "0.6010552", "0.60023296", "0.5994209", "0.5987181", "0.5970371", "0.5967683", "0.59658116", "0.59649277", "0.5959542", "0.5952426", "0.5950957", "0.59480023", "0.5941434", "0.5936087", "0.5930062", "0.59226185", "0.59226185", "0.5918982", "0.5918982", "0.5918982", "0.5904416", "0.58945084", "0.5894482", "0.58905065", "0.5879762", "0.5879528", "0.5878632", "0.5878632", "0.58748245", "0.58719695", "0.58706725" ]
0.6041425
63
This function sleeps for 1 second, then prints out the current time. Notice that this is a task that "blocks" execution of other code
def sundial(): time.sleep(1) print(f"Sundial: {dt.now()}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task_display_funny_time():\n print(\"funny time is %s\" % datetime.datetime.now())\n logger.info(\"Hurray its working\")", "def print_time(s, start_time):\n print(\"%s, time %ds, %s.\" % (s, (time.time() - start_time), time.ctime()))\n sys.stdout.flush()\n return time.time()", "def sleep_for(timeToSleep):\r\n time.sleep(timeToSleep)", "def sleep(seconds):\n time.sleep(seconds)", "def sleep(seconds):\n time.sleep(seconds)", "def waitWithComments(sleep_time, segment=60):\n print(\"\\t%s - %s seconds to go...\" % (datetime.now(), sleep_time))\n while sleep_time > segment:\n time.sleep(segment) # sleep in increments of 1 minute\n sleep_time -= segment\n print(\"\\t%s - %s seconds to go...\" % (datetime.now(), sleep_time))\n time.sleep(sleep_time)", "def timer(description):\n t0 = time.time()\n yield\n print(f'[{description}] done in {time.time() - t0:.0f} s')", "def only_sleep():\n print(get_process_thread_info())\n time.sleep(1)", "def sleep(seconds):\r\n time.sleep(seconds)", "def run(seconds=1):\n time.sleep(seconds)\n print('Slept for ',seconds,' seconds')", "def time_thread(self):\n while self.time > 0:\n t.sleep(1)\n self.time -= 1\n self.end_round(\"Time is up\")", "async def sleep(cls, delay: float) -> None:", "def take_action(duration_length):\r\n print(\r\n f'Sleeping for {duration_length} second(s)...and now it is {strftime(\"%a, %d %b %Y %H:%M:%S +0000\", gmtime())}/')\r\n time.sleep(duration_length)\r\n return f'Wake up after sleeping for {duration_length} second(s)... and now it is {strftime(\"%a, %d %b %Y %H:%M:%S +0000\", gmtime())}/'", "def timer():\n start = time.time()\n # Send control back to the context block\n yield\n end = time.time()\n print('Elapsed: {:.2f}s'.format(end - start))", "def sleep(sleep_time=0.250):\n time.sleep(sleep_time)", "def timer():\n start = time.time()\n\n yield\n\n end = time.time()\n\n print('Elapsed: {:.2f}s'.format(end - start))", "def sleep(self):\n self.sleep_after(1) # Can't be 0, that means 'don't sleep'", "async def current_time_handler():\n\n return time_millis()", "def timer(name):\n t0 = time.time()\n print('[%s] in progress' % name)\n yield\n print('[%s] done in %.0f s' %(name, time.time() - t0))", "def sleep(secs=1.0):\n time.sleep(secs)", "def main():\r\n\r\n print\r\n print '** Demonstrating new Timer print statement:'\r\n\r\n with Timer('Test Timer') as tm:\r\n current_second = 0\r\n while tm.current_result() < 5:\r\n if current_second != int(tm.current_result()):\r\n print '{s} second(s) elapsed.'.format(s=int(tm.current_result()))\r\n current_second = int(tm.current_result())\r\n\r\n print\r\n print '** Changing Timer unit and printing last result:'\r\n tm.unit = 'ms'\r\n print tm.last_result()", "def sleep(seconds):\n # After load and initializing the PvAPI Python's built-in 'sleep' function\n # stops working (returns too early). The is a replacement.\n from time import sleep,time\n t = t0 = time()\n while t < t0+seconds: sleep(t0+seconds - t); t = time()", "def sleep(seconds):\n\n return Sleep(seconds)", "def print_pause(string, t):\n print(string)\n time.sleep(t)", "def clock( current_time ):\n global D\n number_of_seconds_since_start = int(current_time - D.start_time)\n if D.last_time_printed < number_of_seconds_since_start:\n print \"[Brains] [State:\", D.STATE, \"] time is\", \\\n number_of_seconds_since_start, \"seconds since starting...\"\n D.last_time_printed = number_of_seconds_since_start", "def sleep(self, amount: float):\n time.sleep(amount)", "def sleep_countdown(duration, print_step=2):\n\tfor i in range(duration,0,-print_step):\n\t sleep(print_step)\n\t sys.stdout.write(str(i-print_step)+' ')\n\t sys.stdout.flush()", "def wait_up_to_second(second, time_template=None):\r\n current_second = datetime.datetime.now().second\r\n target_second = int(second)\r\n\r\n if current_second > target_second:\r\n sleep_time = 60 - (current_second - target_second)\r\n else:\r\n sleep_time = target_second - current_second\r\n\r\n if sleep_time:\r\n print('Waiting {} second(s)'.format(sleep_time))\r\n time.sleep(sleep_time)\r\n\r\n if time_template:\r\n return Utils.get_current_time(time_template)", "def timed(name):\n t0 = time.time()\n yield\n t1 = time.time()\n print(\"..%-24s: %8.4f\" % (name, t1 - t0))", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def timer():\n start = time.time()\n # Send control back to the context block\n yield timer()\n end = time.time()\n print('Elapsed: {:.2f}s'.format(end - start))", "def sleep(self):\n time.sleep(0.2)", "def __sleep(self):\n if self.sleep_duration > 0:\n gevent.sleep(self.sleep_duration)\n else:\n self.__warn(\n f\"The average tick took longer than the set tick duration of {self.__tick_duration}. \"\n f\"Program is to heavy to run real time\")", "def pass_time(self, t):\n cont = time.time() + t\n while time.time() < cont:\n time.sleep(0)", "def wait(\n t: float,\n f: str,\n ) -> None:\n\n print(\"Waiting for %s...\" % f)\n\n time.sleep(t)\n\n return", "def run(self):\n print \"%s: Start %s (%d s, pid: %d)\" % (self.getName(),time.ctime(),self.time,os.getpid())\n time.sleep(self.time)\n print \"%s: End %s\" % (self.getName(),time.ctime())", "def sleep(interval):\n time.sleep(interval) # pragma: no cover", "def tick():\n\n global time1\n # get the current local time from the PC\n time2 = time.strftime(\"%H:%M:%S\")\n # if time string has changed, update it\n if time2 != time1:\n time1 = time2\n timeLabel.config(text=time2)\n # calls itself every 200 milliseconds\n # to update the time display as needed\n # could use >200 ms, but display gets jerky\n timeLabel.after(200, tick)", "def time_now():\n return time.time()", "def realtime():\n return timemodule.time()", "def _wait(self, time_to_sleep: int, msg: str):\n\n logging.debug(msg)\n if logging.root.level == logging.INFO:\n # Number of times per second to update tqdm\n divisor = 100\n for _ in trange(time_to_sleep * divisor,\n desc=msg):\n time.sleep(1 / divisor)", "def sleep(self):\n return self._sleep", "async def wait_until(dt):\n now = datetime.now()\n await sleep((dt - now).total_seconds())", "async def time(self, ctx):\r\n time = market_time()\r\n await ctx.send(f'It is currently {time.time().strftime(\"%H:%M:%S\")} EDT for the market.')", "def sleep(cls, delay, session):\n print(\"Start sleep for [\", delay, \"]s.[\", session['ip_addr'], \"]\")\n cls.log(1, \"Start sleep for [\", delay,\n \"]s.[\", session['ip_addr'], \"]\")\n time.sleep(delay)\n print(\"End sleep of [\", delay, \"]s.[\", session['ip_addr'], \"]\")\n cls.log(1, \"End sleep of [\",\n delay, \"]s.[\", session['ip_addr'], \"]\")", "def time(self, start_time):\n \n TIME_LIST.append((time.time() - start_time))\n print(\"--- %s seconds ---\" % (time.time() - start_time))", "def time(self):\r\n time = datetime.datetime.now().strftime(\"%I:%M:%S\")\r\n self.speak(\"the current time is\")\r\n self.speak(time)", "def delay():\r\n time.sleep(2)", "async def sleep(self, sleep_time):\n await asyncio.sleep(sleep_time)", "def sleep_sim_time(world, seconds, state_break=[False]):\n start = world.last_time if world.last_time else Time()\n remain = seconds\n\n while remain > 0 and not state_break[0]:\n yield From(trollius.sleep(0.1))\n now = world.last_time if world.last_time else Time()\n remain = seconds - float(now - start)", "def _sleep(self):\n while 1:\n diff = (time.time()-self.lastcall) - self.mindelay\n if diff >= 0: return\n time.sleep(max(-diff/2.0, 0.01))", "def curr_time_millis():\n return 1000 * timeit.default_timer()", "def deepsleep(time_ms: int = None) -> None:", "def sleep(self, seconds=60):\n\t\ttime.sleep(seconds)", "async def my_task(seconds):\n print('This task is taking {} seconds to complete'.format(\n seconds))\n time.sleep(seconds)\n return 'task finished'", "async def _time(self, ctx):\n try:\n await self.bot.say('@{0}:'.format(ctx.message.author.name) + '\\nDate is: **' + time.strftime(\"%A, %B %d, %Y\") + '**' + '\\nTime is: **' + time.strftime(\"%I:%M:%S %p\") + '**')\n except Exception as e:\n await self.bot.say(code.format(type(e).__name__ + ': ' + str(e)))", "def sleep(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"sleep\")", "async def time(self) -> dt.time:\n now = await self.AD.sched.get_now()\n return now.astimezone(self.AD.tz).time()", "def main():\n time.sleep(0.1)", "def pause(seconds):\n time.sleep(seconds);", "def second_task():\n print(\"Second Task's time is = %s\" % datetime.datetime.now())\n logger.info(\"Second Task\")", "def process_next_second(self):\n self.check_day_advance()\n rd = self.active_row\n if not rd:\n # Paused when we still have the 'after' method active.\n # Now that it is not active so we do nothing.\n return\n secs = int((datetime.now() - self.start_time).total_seconds())\n time = self.seconds_to_hms(secs)\n rd.time = time\n rd.label.config(text=time)\n rd.frame.after(1000, self.process_next_second)", "async def time(self, ctx):\n global time_msg\n if timer > 0:\n if time_msg:\n await time_msg.delete()\n time_msg = None\n minutes = timer // 60\n seconds = timer % 60 if timer % 60 > 9 else '0' + str(timer % 60)\n time_msg = await ctx.send(embed=make_time_embed('work'))\n else:\n # await ctx.send(\"No timer active.\")\n await send_msg(ctx, \"❌\", \"No Timer Active\", color='error')\n await ctx.message.delete()", "def sleep(self):\n if self._stop is not None:\n timeLeft = max(self._stop - time.time(), 0) \n sleep = min(self._sleep, timeLeft)\n else:\n sleep = self._sleep\n time.sleep(sleep)", "def handle_sleep(_):\n loop.sleep()", "def now():\r\n return time.time()", "def log_time(name):\n if DEBUG:\n now = time.time()\n logging.debug('emcc step \"%s\" took %.2f seconds', name, now - TimeLogger.last)\n TimeLogger.update()", "def test_time_lapse(self):\n t0 = time.time()\n time.sleep(2)\n lap = time_lapse(t0)\n self.assertEqual(lap, '00:00:02')", "def print_time(self):\n now = time.time()\n running_time = now - self.start_time\n print('\\nCurrent session running time: ' + str(running_time) + 's')\n\n total = self.get_running_time() + running_time\n print('Total running time: ' + str(total) + 's')\n self.set_running_time(total)", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))\n time.sleep(2)", "def wait(period=5):\n import time\n print ('Wait for {val} seconds'.format(val=period))\n time.sleep(float(period))", "def timer(ctx, config):\n log.info('Starting timer...')\n start = time.time()\n try:\n yield\n finally:\n duration = time.time() - start\n log.info('Duration was %f seconds', duration)\n ctx.summary['duration'] = duration", "def printCountdown(delay = DELAY):\n now = datetime.datetime.utcnow()\n refresh_time = now + datetime.timedelta(seconds=delay)\n time_left = refresh_time - now\n while time_left > datetime.timedelta(seconds=0):\n # rewind the line (\\r) and print countdown each second\n sys.stderr.write('\\rUpdating ClassAd in {0:>5d} s'.format(\n time_left.seconds))\n sys.stderr.flush()\n sleep(1)\n time_left = refresh_time - datetime.datetime.utcnow()\n sys.stderr.write('\\rUpdating ClassAd ' + 10*'.' + '\\n\\n')\n sys.stderr.flush()", "def tic():\n import time\n global startTime_for_tictoc\n startTime_for_tictoc = time.time()", "def sync_time(self, event=None):\n if self.collect: return\n time_obj= localtime()\n serial_time = strftime(\"t%Y,%m,%d,%H,%M,%S\", time_obj)\n print(serial_time)\n self.system_timestamp = f\"\\nSystem start time is: {serial_time}\"\n print(serial_time.encode(encoding=\"ascii\"))\n self.ser.write(serial_time.encode(encoding=\"ascii\"))", "def wait(delay=2):\n time.sleep(delay)", "def timer_update(self):\n if self.mineboard.gamestate is not None:\n return\n time_so_far = round(time.time()-self.start_time)\n if time_so_far == 1:\n self.now.set(f\"Time so far: {time_so_far} second\")\n else:\n self.now.set(f\"Time so far: {time_so_far} seconds\")\n self.after(1000, self.timer_update) # calls this function every second", "def print_completion_time (start_time, elapsed = -1.0):\n\n if (elapsed == -1.0):\n elapsed = timeit.default_timer() - start_time\n\n print ('Execution completed in %f seconds' % elapsed)", "def print_pause(message):\n print(message)\n time.sleep(.5)", "def print_time_elapsed(self):\r\n stop_time = time.time()\r\n elapsed_time = stop_time - self.start_time\r\n print(f\"-- time elapsed: {elapsed_time:.5f} s\", flush=True)", "def ONTime(self, SleepTime=1):\n thread = threading.Thread(target=self.ThreadONTime, args=(SleepTime,))\n thread.start()", "def lightleep(time_ms: int = None) -> None:", "def timer(start_time=None):\r\n if not start_time:\r\n start_time = datetime.now()\r\n return start_time\r\n elif start_time:\r\n thour, temp_sec = divmod((datetime.now() - start_time).total_seconds(), 3600)\r\n tmin, tsec = divmod(temp_sec, 60)\r\n print('Time taken: %i hours %i minutes and %s seconds.' % (thour, tmin, round(tsec, 2)))", "def wait(self, sleep_time):\n time.sleep(sleep_time)", "def sleep(self):\n current_time = time.time()\n\n if not self.next_time: # first call\n self.next_time = current_time + self.period\n return\n\n delta = self.next_time - current_time\n if delta > 0:\n time.sleep(delta)\n self.next_time += self.period", "async def print_(string):\n print(string)\n await asyncio.sleep(0.0001)", "def test_sleep():\n time.sleep(3600 * 24)", "def tick(self):\n if self.display_seconds:\n new_time = time.strftime('%I:%M:%S %p')\n else:\n new_time = time.strftime('%I:%M:%S %p').lstrip('0')\n if new_time != self.time:\n self.time = new_time\n self.display_time = self.time\n self.config(text=self.display_time)\n self.after(200, self.tick)", "def randomSleep():\n\n timeToWait = random.choice((0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 5))\n print(\"sleeping\", timeToWait)\n time.sleep(timeToWait)", "def currentTime():\n return strftime(\"%H:%M:%S\", time.localtime())", "def tick(self):\r\n if self.display_seconds:\r\n new_time = time.strftime('%H:%M:%S')\r\n else:\r\n new_time = time.strftime('%I:%M %p').lstrip('0')\r\n if new_time != self.time:\r\n self.time = new_time\r\n self.display_time = self.time\r\n self.config(text=self.display_time)\r\n self.after(200, self.tick)", "def wait():\n time.sleep(1)", "def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)", "def perf_timer():\n start_time = datetime.now()\n yield\n end_time = datetime.now()\n log.info(end_time - start_time)", "def tic():\n then = datetime.datetime.now()\n return lambda: delay(datetime.datetime.now() - then)", "def sleep(seconds):\n\n # Check seconds to ensure it is a valid type.\n if type(seconds) not in [long, float, int]:\n raise RepyArgumentError(\"Invalid type \" + str(type(seconds)))\n\n # Using getruntime() in lieu of time.time() because we want elapsed time \n # regardless of the oddities of NTP\n start = nonportable.getruntime()\n sleeptime = seconds\n\n # Return no earlier than the finish time\n finish = start + seconds\n\n while sleeptime > 0.0:\n time.sleep(sleeptime)\n\n # If sleeptime > 0.0 then I woke up early...\n sleeptime = finish - nonportable.getruntime()", "def now():\n print(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))" ]
[ "0.6798046", "0.66427785", "0.66389567", "0.66341007", "0.66341007", "0.6586261", "0.6557557", "0.6542507", "0.6538116", "0.65356725", "0.64685106", "0.6458485", "0.64469516", "0.6442502", "0.6440117", "0.6428229", "0.64215976", "0.6406094", "0.63931483", "0.638817", "0.63853586", "0.6344666", "0.63036275", "0.6293973", "0.6273576", "0.62623245", "0.6260359", "0.6250443", "0.6235531", "0.6230933", "0.62303495", "0.62303495", "0.62303495", "0.62188554", "0.6212566", "0.6204639", "0.61950815", "0.61786646", "0.61735517", "0.61674005", "0.61379355", "0.6116474", "0.6104864", "0.6083122", "0.6076369", "0.60681224", "0.60396034", "0.603629", "0.6035317", "0.603488", "0.60274416", "0.60262984", "0.60204977", "0.6018357", "0.6006601", "0.59976023", "0.5986043", "0.59785825", "0.5971105", "0.5966404", "0.5963802", "0.5944958", "0.59366935", "0.5934671", "0.59189874", "0.59114873", "0.58947104", "0.5892261", "0.58866286", "0.5882723", "0.58819896", "0.5880294", "0.5878032", "0.5875289", "0.58738464", "0.5857731", "0.58537555", "0.5852534", "0.5850395", "0.5849762", "0.58468795", "0.58446866", "0.5844608", "0.58431613", "0.58403975", "0.58172256", "0.58150405", "0.5811735", "0.58032894", "0.57934415", "0.57866704", "0.5784909", "0.5784784", "0.5780836", "0.5775542", "0.5775393", "0.57720095", "0.5770439", "0.5766498", "0.576085" ]
0.6198006
36
Create and return a function that will extract a date, validate it, and return an ISO formatted date if it is valid, or an empty string if it is not. We need this because the "date recorded" field is directly from the Talking Book, and, as such, is very likely to contain garbage.
def make_date_extractor(md_field: str) -> Callable: def extract(props: Dict[str, str]) -> str: ds = '' v = props.get(md_field, '') try: d = datetime.strptime(v, '%Y/%m/%d') ds = d.strftime('%Y%m%d') except Exception: pass return ds return extract
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_isodate(date_str):\n iso_date = None\n\n if not date_str:\n return None\n\n #first, is it already a valid isodate?\n try:\n isodate.parse_date(date_str)\n return date_str\n except isodate.ISO8601Error, e:\n # if not, try to parse it\n try:\n iso_date = isodate.date_isoformat(parse(date_str))\n except Exception, e:\n log.msg(e.message, level=log.WARNING)\n return None\n\n return iso_date", "def date_or_none(date_str: str | None | dt.date | dt.datetime) -> dt.date | None:\n\n if not date_str:\n return None\n\n if isinstance(date_str, dt.datetime):\n return date_str.date()\n\n if isinstance(date_str, dt.date):\n return date_str\n\n if \" \" in date_str and len(date_str) > 10:\n return dt.datetime.strptime(date_str, \"%d %B %Y\").date()\n\n p_date_str = date_str.replace(\"/\", \"-\").replace(\".\", \"-\")\n date_split = p_date_str.split(\"-\")\n\n if len(date_split) > 3 or len(date_split[-1]) > 4:\n raise ValidationError(f\"Date {date_str} not in parsable format\")\n\n if len(date_split[0]) == 4:\n date_format = \"%Y-%m-%d\"\n elif len(date_split[-1]) == 4:\n date_format = \"%d-%m-%Y\"\n else:\n date_format = \"%d-%m-%y\"\n\n return dt.datetime.strptime(p_date_str, date_format).date()", "def test_validate_date_entry_returns_correct_iso_date(self):\n date_string = \"2018-01-21\"\n date_format = settings.DATE_FORMATS['iso 8601']\n date_object = datetime.datetime.strptime(\n date_string,\n date_format['datetime format'])\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (None, date_object)\n\n self.assertEqual(result, expected_result)", "def interpret_date( text ):\n try:\n as_arrow = arrow.get(text, \"MM/DD/YYYY\").replace(\n tzinfo=tz.tzlocal())\n except:\n flask.flash(\"Date '{}' didn't fit expected format 12/31/2001\")\n raise\n return as_arrow.isoformat()", "def parse_iso_date(value):\n try:\n datetime.strptime(value, \"%Y-%m-%d\")\n return value\n except Exception:\n return None", "def date_specificity(date_string):\n length = len(date_string)\n if length == 10:\n return 'ymd'\n elif length == 7:\n return 'ym'\n elif length == 4:\n return 'y'\n return None", "def convertFromISODate(date):\n if date:\n try:\n datetime_object = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%fZ')\n except ValueError:\n return date\n else:\n return datetime_object.strftime('%Y-%m-%d')\n else:\n return None", "def date_string_to_iso(string):\n date=None\n if string is not None:\n try:\n #if separator is \"-\"\n if \"-\" in string:\n strings=string.split(\"-\")\n else:\n strings=string.split(\"/\")\n\n #~ print \"strings\"\n #~ print strings\n \n #if year is first\n if len(strings[0])==4:\n year, month, day=strings[0], strings[1], strings[2]\n #if year is last\n else:\n #the year must be coded on 4 digits\n year, month, day=strings[2], strings[1], strings[0]\n date=date_split_to_iso(year, month, day)\n except Exception, e:\n print \"pb\", string\n print \"wrong date format\", e\n\n #return None if date string is None\n return date", "def get_date_or_none(date_str, date_format='%Y-%m-%d'):\n try:\n return datetime.strptime(date_str, date_format).date()\n except (ValueError, TypeError):\n return None", "def strip_date(self, arg, line_number=0):\n try:\n dt = datetime.strptime(arg, \"%d %b %Y\")\n return dt\n except ValueError:\n raise ValueError(f\"US42 - Illegitimate date of {arg}. GEDCOM line: {line_number}\")\n else:\n return 'NA'", "def date(self):\n if self.raw_date_str == \"Unknown\":\n return self.raw_date_str\n else:\n try:\n _date = datetime.datetime.strptime(self.raw_date_str, \"%B %d, %Y\")\n except ValueError:\n try:\n _date = datetime.datetime.strptime(self.raw_date_str, \"%B %Y\")\n except ValueError:\n logger.error(\"Unable to parse date: {}\".format(self.raw_date_str))\n return None\n return _date.date()", "def interpret_date(text):\n try:\n as_arrow = arrow.get(text, \"MM/DD/YYYY\").replace(\n tzinfo=tz.tzlocal())\n except:\n flask.flash(\"Date '{}' didn't fit expected format 12/31/2001\")\n raise\n return as_arrow.isoformat()", "def validate_date(date_str):\n\ttry:\n\t\treturn (datetime.strptime(date_str, \"%Y-%m-%d\"), \"Date format matched\")\n\texcept Exception as e:\n\t\tlog.error(sys.exc_info()[0], e)\n\t\treturn (None, \"Date should be of YYYY-MM-DD format\")", "def get_date(date):\n return date", "def _cleanup_date(self, date):\n if (not date or\n not isinstance(date, str) or\n 'N/A' in date):\n return None\n\n date, time = date.split()\n day, month, year = date.split('.')\n hour, minute = time.split(':')\n year, month, day, hour, minute = [int(x) for x in (year, month, day, hour, minute)]\n\n # For 2-digit years, 1969/2068 is the wrap-around (POSIX standard)\n if (69 <= year < 100):\n year += 1900\n elif (0 <= year < 69):\n year += 2000\n\n return datetime.datetime(year, month, day, hour, minute)", "def valid_date(s):\n try:\n return datetime.datetime.strptime(s, \"%Y-%m-%d\")\n except ValueError:\n print(\"Error: Not a valid date: '{0}'.\".format(s))\n\n return", "def insure_date(d):\n if isinstance(d, BeautifulDate):\n return date(year=d.year, month=d.month, day=d.day)\n else:\n return d", "def create_date():\n dt = fake.date_object()\n\n try:\n human_readable = format_date(dt, format=random.choice(FORMATS), locale=random.choice(LOCALES))\n\n case_change = random.choice([0,1,2])\n if case_change == 1:\n human_readable = human_readable.upper()\n elif case_change == 2:\n human_readable = human_readable.lower()\n # if case_change == 0, do nothing\n\n machine_readable = dt.isoformat()\n except AttributeError as e:\n return None, None, None\n\n return human_readable, machine_readable, dt", "def parse_isodate(iso_date):\n date = None\n\n try:\n date = isodate.parse_date(iso_date)\n except Exception, e:\n log.msg(e.message, level=log.WARNING)\n\n return date", "def verify_date_field(field, data: str) -> str:\n if field in ['started_at', 'ended_at', 'created_at', 'performed_at', 'issued_at', 'expires_at']:\n content = string_to_date(data)\n else:\n content = data\n\n return content", "def clean_date_firstrade(datestr):\n return datetime.datetime.strptime(datestr, '%m/%d/%Y').strftime('%Y-%m-%d')", "def __get_date(measurement):\n return ast.literal_eval(measurement).get('date') if measurement else 'unknown date'", "def str_to_date(date_str: str) -> Optional[datetime.date]:\n if not date_str:\n # If the type is falsy, return None.\n return\n try:\n # Most dates in the API are in this format...\n return datetime.strptime(date_str, \"%m/%d/%Y\").date()\n except ValueError:\n # Please forgive me for this nested try-except block.\n # This API is _whack_.\n try:\n # But some are in this format...\n return datetime.strptime(date_str, \"%Y-%m-%d\").date()\n except ValueError:\n # And sometimes you get random crap like '0000-00-00'...\n return\n except TypeError:\n # If the type is truthy, but can't be cast to a date, return None.\n return", "def american_date_to_iso(connection):\n _update_date_by_regexp(connection=connection,\n regexp=\"^[0-9]{2}/[0-9]{2}/[0-9]{4}$\",\n new_value=\"\"\"CONCAT_WS('-',\n SUBSTR(cav.attribute_value, 7, 4),\n SUBSTR(cav.attribute_value, 1, 2),\n SUBSTR(cav.attribute_value, 4, 2))\n \"\"\")", "def compute_date(date_text):\n dt = None\n if date_text and len(date_text) == 8:\n try:\n dt = datetime.datetime.strptime(date_text, '%m%d%Y')\n except ValueError:\n pass\n return dt", "def date_to_iso(string):\r\n\r\n # disregard tokenisation, if it's there, to make this an easier conversion for GUTime\r\n string = re.sub(r'<([^~]*)~.+?>', r'\\1 ', string)\r\n\r\n # Defaults\r\n d = None\r\n m = None\r\n y = None\r\n h = None\r\n min = None\r\n s = None\r\n fs = None\r\n zone = None\r\n\r\n # ACE format\r\n match = re.search(r'(\\d\\d\\d\\d\\d\\d\\d\\d:\\d\\d\\d\\d)', re.sub('\\s', '', string))\r\n if match is not None:\r\n d = match.group(1)\r\n d = re.sub(r':', r'T', d)\r\n return d\r\n\r\n # Already in ISO format\r\n match = re.search(r'(\\d\\d\\d\\d-?\\d\\d-?\\d\\d)(-?(T\\d\\d(:?\\d\\d)?(:?\\d\\d)?([+-]\\d{1,4})?))?', re.sub('\\s', '', string))\r\n if match is not None:\r\n d = match.group(1)\r\n d = re.sub(r'-', r'', d)\r\n h = match.group(3)\r\n if h is not None:\r\n h = re.sub(r':', r'', h)\r\n return d + h\r\n else:\r\n return d\r\n\r\n # some pre-processing\r\n match = re.search('T\\d\\d(:?\\d\\d)?(:?\\d\\d)?([+-]\\d{1,4})?', re.sub('\\s', '', string))\r\n if match is not None:\r\n return re.sub(r':', r'', re.sub('\\s', '', string))\r\n\r\n # extract date\r\n if re.search(\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\s+'\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s*,?\\s+(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I) is not None:\r\n match = re.search(\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\s+'\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s*,?\\s+(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I)\r\n d = ordinal_to_num(match.group(1))\r\n m = month_to_num(match.group(5))\r\n y = match.group(7)\r\n\r\n elif re.search(\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s+'\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\b,?\\s*(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I) is not None:\r\n match = re.search(\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s+'\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\b,?\\s*(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I)\r\n d = ordinal_to_num(match.group(4))\r\n m = month_to_num(match.group(1))\r\n y = match.group(7)\r\n\r\n elif re.search(r'(\\d\\d\\d\\d)(\\/|\\-)(\\d\\d?)\\2(\\d\\d?)', re.sub('\\s', '', string)) is not None:\r\n match = re.search(r'(\\d\\d\\d\\d)(\\/|\\-)(\\d\\d?)\\2(\\d\\d?)', re.sub('\\s', '', string))\r\n m = match.group(3)\r\n d = match.group(4)\r\n y = match.group(1)\r\n\r\n elif re.search(r'(\\d\\d?)(\\/|\\-|\\.)(\\d\\d?)\\2(\\d\\d(\\d\\d)?)', re.sub('\\s', '', string)) is not None:\r\n match = re.search(r'(\\d\\d?)(\\/|\\-|\\.)(\\d\\d?)\\2(\\d\\d(\\d\\d)?)', re.sub('\\s', '', string))\r\n m = match.group(1)\r\n d = match.group(3)\r\n y = match.group(4)\r\n\r\n if y is not None:\r\n # check for European style date\r\n if 12 < int(m) <= 31 and int(d) <= 12:\r\n new_d = m\r\n m = d\r\n d = new_d\r\n\r\n # check for 2 digit year\r\n y = normalise_two_digit_year(str(y))\r\n\r\n iso = \"%4d%02d%02d\" % (int(y), int(m), int(d))\r\n\r\n else:\r\n iso = \"XXXXXXXX\"\r\n\r\n # Extract time\r\n match = re.search(r'(\\d?\\d):(\\d\\d)(:(\\d\\d)(\\.\\d+)?)?(([AP])\\.?M\\.?)?(([+\\-]\\d+|[A-Z][SD]T|GMT([+\\-]\\d+)?))?',\r\n re.sub('\\s', '', string), re.I)\r\n if match is not None:\r\n h = match.group(1)\r\n min = match.group(2)\r\n s = match.group(4)\r\n fs = match.group(5)\r\n ampm = match.group(7)\r\n zone = match.group(9)\r\n\r\n if ampm is not None and ampm[0].lower() == 'p':\r\n h = str(int(h) + 12)\r\n\r\n if zone is not None:\r\n zm = re.search(r'(GMT)([+\\-]\\d+)', zone)\r\n if zm is not None:\r\n zone = zm.group(2)\r\n elif zone.lower().find('gmt') > -1:\r\n zone = 'Z'\r\n elif re.search(r'([A-Z])([SD])T', zone) is not None:\r\n zm = re.search(r'([A-Z])([SD])T', zone)\r\n # Timezone offsets from GMT\r\n timezones = {\r\n \"R\": 1,\r\n \"E\": -5,\r\n \"C\": -6,\r\n \"M\": -7,\r\n \"P\": -8\r\n }\r\n if zm.group(1).upper() in timezones:\r\n zone = timezones[zm.group(1).upper()]\r\n if zm.group(2).lower() == 'd':\r\n zone += 1\r\n if zone < 0:\r\n zone = '-%02d00' % (-1 * zone)\r\n else:\r\n zone = '+%02d00' % zone\r\n elif re.search(r'(\\d\\d)(\\d\\d)\\s+(h(ou)?rs?|(on\\s+)?\\d\\d?\\/\\d)', string, re.I) is not None:\r\n match = re.search(r'(\\d\\d)(\\d\\d)\\s+(h(ou)?rs?|(on\\s+)?\\d\\d?\\/\\d)', string, re.I)\r\n h = match.group(1)\r\n min = match.group(2)\r\n\r\n if h is not None:\r\n if fs is not None:\r\n fs = re.sub(r'\\.', r'', fs)\r\n iso += 'T%02d%02d%02d.%02d' % (int(h), int(min), int(s), int(fs))\r\n elif s is not None:\r\n iso += 'T%02d%02d%02d' % (int(h), int(min), int(s))\r\n elif min is not None:\r\n iso += 'T%02d%02d' % (int(h), int(min))\r\n\r\n if zone is not None:\r\n iso += zone.lstrip()\r\n\r\n return iso", "def validate_input(date_string):\n #I decided to make sure the input was valid by checking each individual piece. I did this by splitting the input string by the dashes.\n #I checked first that the month value was between 1 and 12. I then checked depending on the month if the day value was valid.\n #I also made sure to check that the year was greater than 1000.\n #For February, I made a specific check for if it was a leap year or not. If the year inputted is not a leap year and the user entered\n #29 as the day value, it throws an error. Finally, once all values are checked and are valid, they are put into a tuple.\n splitdate = date_string.split(\"-\")\n if splitdate[0] != '' and splitdate[1] != '' and splitdate[2] != '':\n if int(splitdate[0]) >= 1 and int(splitdate[0]) <= 12:\n if int(splitdate[0]) == 1 or int(splitdate[0]) == 3 or int(splitdate[0]) == 5 or int(splitdate[0]) == 7 or int(splitdate[0]) == 8 or int(splitdate[0]) == 10 or int(splitdate[0]) == 12:\n if int(splitdate[1]) >= 1 and int(splitdate[1]) <= 31:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n elif int(splitdate[0]) == 4 or int(splitdate[0]) == 6 or int(splitdate[0]) == 9 or int(splitdate[0]) == 11:\n if int(splitdate[1]) >= 1 and int(splitdate[1]) <= 30:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n elif int(splitdate[0]) == 2:\n if int(splitdate[2]) % 4 == 0 or int(splitdate[2]) % 1000 == 0:\n if int(splitdate[1]) >= 1 and int(splitdate[1]) <= 29:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n elif int(splitdate[1]) >= 1 and int(splitdate[1]) <= 28:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n return None", "def _validate_date(mapping: Mapping[str, Any],\n ref: str) -> Optional[SchemaError]:\n if 'format' in mapping:\n token_lines = None # type: Optional[List[List[lexery.Token]]]\n try:\n token_lines = mapry.strftime.tokenize(format=mapping['format'])\n except (lexery.Error, NotImplementedError) as err:\n return SchemaError(str(err), ref='{}/format'.format(ref))\n\n valerr = mapry.strftime.validate_date_tokens(token_lines=token_lines)\n if valerr is not None:\n return SchemaError(str(valerr), ref='{}/format'.format(ref))\n\n return None", "def _date_from_str(self, date_entry, date_str):\n dt_obj = None\n if date_str:\n dt_obj = parser.parse(date_str)\n if dt_obj < MIN_DATE or dt_obj > MAX_DATE:\n prompt = 'Please keep dates within Jan 1, 2015 up to today.'\n raise ValueError(prompt)\n \n return dt_obj", "def _handle_bad_input_date(f):\n def date_handler_wrapper(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception as e:\n import re\n e_str = \"{}\".format(e)\n for r in [\".*date/time field value out of range: \\\"(.*)\\\".*LINE\",\n \".*invalid input syntax for type timestamp: \\\"(.*)\\\".*\",\n \".*timestamp out of range: \\\"(.*)\\\".*\"]:\n p = re.compile(r, re.DOTALL)\n m = p.match(e_str)\n if m and len(m.groups()) > 0:\n bad_date = m.group(1)\n raise wsme.exc.ClientSideError(_(\n \"Invalid date '{}' specified\".format(bad_date)))\n raise\n return date_handler_wrapper", "def parse_date(line):\n line = line.replace(\" \", \"\")\n ret = re.search(r'\\d{1,2}-\\d{1,2}', line)\n if ret is not None and len(line) <= 5:\n return ret.group(0)\n else:\n return \"\"", "def cleanVitalsDate(date_str):\n if date_str[-1] != 'Z':\n date_str += 'Z'\n return date_str.replace(' ', 'T')", "def parse_iso_date(date_string):\n\n try:\n return date.fromisoformat(date_string)\n except ValueError:\n raise DomainError(\"Incorrect date format!\", \"D0001\")", "def determine_project_date(self):\n\n if self.params[\"hosted on comic\"]:\n\n if self.params[\"workshop date\"]:\n date = self.to_datetime(self.params[\"workshop date\"])\n else:\n date = \"\"\n else:\n datestr = self.params[\"workshop date\"]\n # this happens when excel says its a number. I dont want to force the\n # excel file to be clean, so deal with it here.\n if type(datestr) == float:\n datestr = str(datestr)[0:8]\n\n try:\n date = timezone.make_aware(datetime.datetime.strptime(datestr,\"%Y%m%d\"),\n timezone.get_default_timezone())\n except ValueError as e:\n logger.warn(\"could not parse date '%s' from xls line starting with '%s'. Returning default date 2013-01-01\" %(datestr,self.params[\"abreviation\"]))\n date = \"\"\n\n\n if date == \"\":\n # If you cannot find the exact date for a project,\n # use date created\n if self.params[\"hosted on comic\"]:\n return self.params[\"created at\"]\n # If you cannot find the exact date, try to get at least the year right.\n # again do not throw errors, excel can be dirty\n\n year = int(self.params[\"year\"])\n\n try:\n date = timezone.make_aware(datetime.datetime(year,1,1),\n timezone.get_default_timezone())\n except ValueError:\n logger.warn(\"could not parse year '%f' from xls line starting with '%s'. Returning default date 2013-01-01\" %(year,self.params[\"abreviation\"]))\n date = timezone.make_aware(datetime.datetime(2013,1,1),\n timezone.get_default_timezone())\n\n return date", "def extract_date(filename, corpus):\n try:\n if corpus in (\"ester1\", \"ester2\"):\n date_match = re.match(r'^(\\d\\d\\d\\d)', filename)\n else:\n if \"EST2BC-FRE-FR-FINTER-DEBATE\" in filename:\n date_match = re.match(r'.*\\_(\\d\\d\\d\\d)\\d\\d\\d\\d\\_', filename)\n elif \"EST2BC_FRE_FR\" in filename:\n date_match = re.match(r'.*\\_(\\d\\d\\d\\d)\\d\\d\\d\\d\\_', filename)\n else:\n date_match = re.match(r'.*\\_(\\d\\d\\d\\d)\\-', filename)\n date = str(date_match.group(1))\n return date\n\n except Exception as e:\n print(\"Exception du try extract_date\")\n print(e)\n date = \"NA\"\n return date", "def _format_date(input_date, day_flag, sep_char=\"-\"):\n date_iso = input_date[6:10] + sep_char + input_date[0:2]\n if day_flag:\n date_iso = date_iso + sep_char + input_date[3:5]\n return date_iso", "def get_date(text=\"\"):\n clear()\n date = input(\"Enter {}date (Format:YYYY-MM-DD): \".format(text))\n try:\n datetime.datetime.strptime(date, \"%Y-%m-%d\")\n except ValueError:\n input(\"Please enter date in this format: YYYY-MM-DD.\"\n \" Press enter to continue.\")\n return get_date()\n else:\n return date", "def get_prep_value(self, value):\n\n try:\n return value.isoformat()\n except:\n pass\n\n # maybe value is a string containing a PartialDate?\n try:\n pd = string_to_partialdate(value)\n return pd.isoformat()\n except:\n return ''", "def __calculate_date_recorded(self, upload_date_str):\n\n upload_date = datetime.date(\n int(upload_date_str[0:4]),\n int(upload_date_str[4:6]), int(upload_date_str[6:8]))\n if self.event.know_date:\n if not (self.event.date_begin <= upload_date <=\n self.event.date_end):\n return self.event.date_default.isoformat()\n\n return upload_date.isoformat()", "def _get_normal_date(self, args):\n\n func1, func2, func3 = args\n self.assertIsNotNone(func1(20130201, \"20190120\"))\n self.assertIsNotNone(func2(\"2013/02/01\", \"2019-01-20\"))\n self.assertIsNotNone(func3(r\"2013-/\\-02~@-\\/-@~01\",\n pd.to_datetime('2019-01-20')))", "def extract_date_from_iso_time(t: str) -> typing.Optional[datetime.date]:\n if not t:\n return None\n\n date, _ = t.split('T')\n return datetime.date.fromisoformat(date)", "def get_python_date(self):\n return dateutil.parser.parse(self.iso_date)", "def validate_date(date):\n\n if isinstance(date, datetime):\n try:\n date = dto_to_str(date)\n except ValueError:\n pass # What to do in this case?\n else:\n return date\n\n if isinstance(date, str) or isinstance(date, unicode):\n try: # Convert to dto then back to string to ensure format is as expected\n date = str_to_dto(date)\n date = dto_to_str(date)\n except ValueError:\n pass\n else:\n return date\n\n raise DataValidationError(\"Date, {}, is not of an expected type (datetime object or string in format YYYYMMDD or MM/DD/YYYY\".format(date))", "def validate_date_field(self, field: dict, value: str):\n if field.get(\"required\") and value.strip() == \"\":\n return f\"{field.get('label')} is required!\"\n\n try:\n datetime.datetime.strptime(value, self.config.get(\"date_format\"))\n except ValueError:\n return f\"{field.get('label')} should be a date with the format provided in \" \\\n f\"config {self.config.get('date_format')}\"\n\n return \"\"", "def unify_date_format(date):\n if type(date) == str:\n try:\n date = dateutil.parser.parse(date) \n except:\n pass\n return date", "def to_date_or_none(value: Optional[Union[datetime.date, str]]) -> Optional[datetime.date]:\n if isinstance(value, datetime.date):\n return value\n if value is None or value == '000000':\n return None\n return datetime.datetime.strptime(value, '%d%m%y').date()", "def parse_date_str(self, date_str, date_format=DATE_FORMAT):\n try:\n return self.adjust_year(datetime.strptime(date_str, date_format).date())\n except ValueError:\n return None", "def format_date(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_date(data)\r\n\r\n return data.isoformat()", "def create_date():\n dt = fake.date_object()\n\n # wrapping this in a try catch because\n # the locale 'vo' and format 'full' will fail\n try:\n human = format_date(dt,\n format=random.choice(FORMATS),\n locale=random.choice(LOCALES))\n\n case_change = random.randint(0,3) # 1/2 chance of case change\n if case_change == 1:\n human = human.upper()\n elif case_change == 2:\n human = human.lower()\n\n machine = dt.isoformat()\n except AttributeError as e:\n return None, None, None\n\n return human, machine #, dt", "def create_date():\n dt = fake.date_object()\n\n # wrapping this in a try catch because\n # the locale 'vo' and format 'full' will fail\n try:\n human = format_date(dt,\n format=random.choice(FORMATS),\n locale=random.choice(LOCALES))\n\n case_change = random.randint(0,3) # 1/2 chance of case change\n if case_change == 1:\n human = human.upper()\n elif case_change == 2:\n human = human.lower()\n\n machine = dt.isoformat()\n except AttributeError as e:\n return None, None, None\n\n return human, machine #, dt", "def _get_date_from_str(date_input):\r\n return datetime.datetime.strptime(date_input.strip(), \"%Y-%m-%d\").replace(tzinfo=pytz.UTC)", "def process_pub_date(pmid, year, mon, day, medline_date):\n\n if not year:\n year = 1900\n\n if medline_date:\n\n match = re.search(r\"\\d{4,4}\", medline_date)\n if match:\n year = match.group(0)\n\n if int(year) < 1900:\n year = 1900\n\n if year and re.match(\"[a-zA-Z]+\", mon):\n try:\n pub_date = datetime.datetime.strptime(f\"{year}-{mon}-{day}\", \"%Y-%b-%d\").strftime(\n \"%Y-%m-%d\"\n )\n except Exception as e:\n pub_date = \"1900-01-01\"\n log.error(f\"Problem converting {year} {mon} {day} to pubdate for PMID:{pmid}\")\n\n elif year:\n pub_date = f\"{year}-{mon}-{day}\"\n\n else:\n pub_date = None\n if year and re.match(\"[a-zA-Z]+\", mon):\n try:\n pub_date = datetime.datetime.strptime(f\"{year}-{mon}-{day}\", \"%Y-%b-%d\").strftime(\n \"%Y-%m-%d\"\n )\n except Exception as e:\n pub_date = \"1900-01-01\"\n log.error(f\"Problem converting {year} {mon} {day} to pubdate for PMID:{pmid}\")\n\n elif year:\n pub_date = f\"{year}-{mon}-{day}\"\n\n return pub_date", "def is_valid_date(date):\n\n try:\n parse(date)\n return date\n except:\n new_date = raw_input(\"Invalid date, try again: YYYY-MM-DD \")\n return is_valid_date(new_date)", "def knowledge_date_valid(record):\n today = datetime.now(timezone.utc).date().strftime(\"%Y-%m-%d\")\n gen_date = record['knowledge_date'].strftime(\"%Y-%m-%d\")\n assert gen_date == today", "def convert_dates_to_ISO(date: str, date_2: str):\n iso_date_1 = \"0000-00-00\" # default values\n iso_date_2 = \"0000-00-00\"\n month_dict = { # can't wait for python 3.10 when we get switch statements :D\n 'january': 1,\n 'february': 2,\n 'march': 3,\n 'april': 4,\n 'may': 5,\n 'june': 6,\n 'july': 7,\n 'august': 8,\n 'september': 9,\n 'october': 10,\n 'november': 11,\n 'december': 12\n }\n try:\n current_year = d.today().year\n month_1 = date.split(\" \")[0]\n month_1_int = month_dict[month_1]\n day_1 = date.split(\" \")[1]\n month_2 = date_2.split(\" \")[0]\n month_2_int = month_dict[month_2]\n day_2 = date_2.split(\" \")[1]\n except KeyError:\n return {\"Start date not set\", \"End date not set\"} # I'm not overly creative\n if month_1_int > month_2_int or \\\n ((month_1_int == month_2_int) and day_1 > day_2): # if the second date happens first\n iso_date_1 = str(current_year) + \"-\" + get_ISO_num_string(month_1_int) + \"-\" + get_ISO_num_string(int(day_1))\n iso_date_2 = str(int(current_year) + 1) + \"-\" + get_ISO_num_string(month_2_int) + get_ISO_num_string(int(day_2))\n else:\n iso_date_1 = str(current_year) + \"-\" + get_ISO_num_string(month_1_int) + \"-\" + get_ISO_num_string(int(day_1))\n iso_date_2 = str(current_year) + \"-\" + get_ISO_num_string(month_2_int) + \"-\" + get_ISO_num_string(int(day_2))\n return {iso_date_1, iso_date_2}", "def format_date(d):\n if type(d) == str:\n d = dateutil_parse(d)\n return d.isoformat()", "def extract_date(str_date):\n rgx = re.compile(r'((\\d{4})-(\\d{2})-(\\d{2}))')\n o_match = rgx.search(str_date)\n if o_match is not None:\n\n lng_day = int(o_match.group(4))\n lng_month = int(o_match.group(3))\n lng_year = int(o_match.group(2))\n\n # These digits may not give a legitimate combination of Y M D\n try:\n dte = datetime(lng_year, lng_month, lng_day)\n except ValueError:\n # Use today's values as defaults, and use any part that does work\n dte = datetime.now()\n # Start with day=1 in case the month is feb and the day 30 etc\n dte = datetime.replace(dte, day=1, hour=0, minute=0, \\\n second=0, microsecond=0)\n try:\n dte = datetime.replace(dte, year=lng_year)\n except ValueError:\n pass\n try:\n dte = datetime.replace(dte, month=lng_month)\n except ValueError:\n pass\n try:\n dte = datetime.replace(dte, day=lng_day)\n except ValueError:\n pass\n\n i_start = o_match.start()\n tpl_date_rest = (dte, str_date[0:i_start] + ' ' + \\\n str_date[i_start + 10:])\n\n else:\n tpl_date_rest = (None, str_date)\n\n return tpl_date_rest", "def valid_date(date_string):\n date_string_number = re.sub('\\D', '', date_string)\n try:\n date_res = datetime.strptime(date_string_number, '%Y%m%d').date()\n except ValueError:\n print(\"Not a valid date: '{}'.\".format(date_string))\n else:\n return date_res", "def _sanitize_date(self, datestr):\n nums = [int(x) for x in datestr.split('/')]\n padded = [\"{:0>2}\".format(x) for x in nums]\n return \"/\".join(padded)", "def getdatefromuser():\n date_str = raw_input(\"Enter the date cutoff in mm/dd/yyyy format: \")\n date_parts = re.split('[-/]', date_str)\n return date(*[int(elt) for elt in [date_parts[2], date_parts[0], date_parts[1]]])", "def extract_date(string):\r\n filename = os.path.basename(string)\r\n noext = os.path.splitext(filename)[0]\r\n\r\n expressions = [\r\n # XPS_201307121140a YYYYmmDDHHMM\r\n [r'XPS_([0-9]{12}).*', '%Y%m%d%H%M'],\r\n # XPS_031105a.xml DDmmyy\r\n #[r'XPS_([0-9]{6}).*', '%d%m%y'],\r\n # XPS_07aug22b_countratetest YYmmmDD\r\n #,[r'XPS_([0-9]{6})[a-zA-Z].*', '%d%m%y'],\r\n # XPS_07aug22b_countratetest YYmmmDD\r\n #,[r'XPS_([0-9]{2}[a-zA-Z]{3}[0-9]{2}).*', '%y%b%d'],\r\n # XPS_20080111a YYYYMMDD\r\n #,[r'XPS_([0-9]{8}).*', '%Y%m%d']\r\n ]\r\n date_out = None\r\n for exp in expressions:\r\n try:\r\n res = re.search(exp[0], noext)\r\n pure = res.group(1)\r\n try:\r\n date_out = time.mktime(time.strptime(pure, exp[1]))\r\n except ValueError:\r\n print ' Date could not be parsed:', pure\r\n except (AttributeError, IndexError):\r\n pass\r\n if date_out is None:\r\n print ' Filename pattern unknown:', noext\r\n return date_out", "def input_date(self, date_attr):\r\n try:\r\n date = input(\"Entrez la \" + date_attr + \"(JJ/MM/AAAA): \")\r\n datetime.datetime.strptime(date, '%d/%m/%Y')\r\n return date\r\n except ValueError:\r\n print(\"Erreur de saisie de la date (format JJ/MM/AAAA)\")\r\n return self.input_date(date_attr)", "def _str_to_date(self, date):\n return datetools.date_parser(date)", "def dateFieldValidator(field):\n if not (field[\"type\"] == \"datetime\" or field[\"type\"] == \"date\"):\n raise ValueError(\"DateFieldValidator error: field type \" + field[\"type\"])\n if \"format\" in field:\n format_string = field[\"format\"]\n # The following is borrowed from datapackage.py...\n\n # Order of the replacements is important since month and minutes\n # can be denoted in a similar fashion\n replacement_order = [('hh', '%m'), (':mm', ':%M'), ('ss', '%S'),\n ('yyyy', '%Y'), ('yy', '%y'), ('mm', '%m'),\n ('dd', '%d')]\n\n # For each replacement we substitute (and ignore the case)\n for (old, new) in replacement_order:\n format_string = re.sub(\"(?i)%s\" % old, new, format_string)\n if field[\"type\"] == \"datetime\":\n return lambda x: datetime.datetime.strptime(x, format_string)\n else:\n return lambda x: datetime.datetime.strptime(x, format_string).date()\n else:\n if field[\"type\"] == \"datetime\":\n return lambda x: datetime.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S%Z')\n else:\n return lambda x: datetime.datetime.strptime(x, '%Y-%m-%d').date()", "def test_date_to_string_returns_valid_string(self):\n date_string = \"2018-01-21\"\n date_format = \"%Y-%m-%d\"\n date_object = datetime.datetime.strptime(\n date_string,\n date_format\n )\n result = self.menu.date_to_string(date_object)\n\n self.assertEqual(result, date_string)", "def search_date(self, text='date'):\n\n date = input(f\"\\nEnter a {text} (MM-DD-YYYY): \")\n date_obj = datetime.strptime(date, \"%m-%d-%Y\")\n\n try:\n date = datetime.strftime(date_obj, \"%m-%d-%Y\")\n return date\n except ValueError:\n input(\"\\nFormat of date must be MM-DD-YYYY\\n\")\n return self.search_date()", "def _parse_ISO8601_date(date: str) -> datetime.datetime:\n try:\n dt = parser.isoparse(date)\n if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None:\n return dt\n return dt.astimezone()\n except ValueError:\n raise ValidationError(\n \"Expecting date in ISO8601 format, eg. 2018-08-01T00:00:00Z, \"\n f\"gets {date} instead.\"\n )", "def seekdate(string):\n match = None\n if rxg_date_dmy.match(string):\n match = rxg_date_dmy.match(string)\n formatstring = __parsedate(match,'dmy')\n return formatstring \n elif rgx_date_mdy.match(string):\n match = rgx_date_mdy.match(string)\n return __parsedate(match,'myd')\n elif rgx_date_ymd.match(string):\n match = rgx_date_ymd.match(string)\n return __parsedate(match,'ymd')\n elif rgx_date_ydm.match(string):\n match = rgx_date_ydm.match(string)\n return __parsedate(match,'ydm')\n else:\n return None", "def check_dateformat(date_field, date_format='YYYY-MM-DD'):\r\n if not date_format or not date_field:\r\n return None\r\n # format = \"%Y-%m-d\"\r\n date_field = date_field.strip()\r\n\r\n try:\r\n dd = None\r\n mm = None\r\n yyyy = None\r\n seperator = '-'\r\n date_part = date_field\r\n time_part = None\r\n if '/' in date_field:\r\n seperator = '/'\r\n if ' ' in date_field:\r\n (date_part, time_part) = date_field.split(' ')\r\n\r\n if not time_part:\r\n if date_format == 'DD-MM-YYYY' or date_format == 'DD/MM/YYYY':\r\n (dd, mm, yyyy) = date_part.split(seperator)\r\n elif date_format == 'YYYY-MM-DD' or date_format == 'YYYY/MM/DD':\r\n (yyyy, mm, dd) = date_part.split(seperator)\r\n elif date_format == 'YYYY-DD-MM' or date_format == 'YYYY/DD/MM':\r\n (yyyy, dd, mm) = date_part.split(seperator)\r\n yyyy = int(yyyy)\r\n dd = int(dd)\r\n mm = int(mm)\r\n date_part = date(yyyy, mm, dd)\r\n return date_part\r\n else:\r\n raise SIDException(\r\n 'Invalid Date: datetime not supported', 'datetime')\r\n # to support further \"%d/%m/%Y %H:%M:%S\"\r\n\r\n # date_string = str(yyyy) + '-' + str(mm) + '-' + str(dd)\r\n # return datetime.strptime(date_string, format)\r\n\r\n except Exception:\r\n raise SIDException('Invalid Date', 'check_dateformat')", "def refine_date(c):\n return strip_some_punct(c)", "def load_date(self):\n dt = self.fake.date_object()\n\n human_readable = format_date(dt, format=random.choice(FORMATS), locale='en_US')\n human_readable = human_readable.lower()\n human_readable = human_readable.replace(',', '')\n machine_readable = dt.isoformat()\n\n return human_readable, machine_readable", "def validate_date(column_name, value, date_format, column_data_type=\"date\"):\n value = value.replace(\"T\", \" \")\n dtpart = value.split(\" \")\n value = dtpart[0]\n try:\n datetime.strptime(value, date_format)\n return None\n except ValueError:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)", "def publication_date(self):\n try:\n return PartialDate.loads(\n get_value(self.record, \"imprints.date[0]\")\n or LiteratureReader(self.record).publication_date\n )\n except ValueError:\n return None", "def get_date(bfo):\n from invenio.bibformat_elements.bfe_INSPIRE_arxiv import get_arxiv\n\n #true date\n date = bfo.fields('269__c')\n if date:\n datestruct=parse_date(date[0])\n if datestruct[0]:\n return(datestruct)\n\n\n #arxiv date\n arxiv = get_arxiv(bfo,category=\"no\")\n if arxiv:\n date=re.search('(\\d+)',arxiv[0]).groups()[0]\n if len(date) >=4:\n year = date[0:2]\n if year > '90':\n year='19'+year\n else:\n year='20'+year\n date = year+date[2:4]+'00'\n date=parse_date(date)\n if date[1]: return(date)\n\n\n\n #journal year\n if bfo.fields('773__y'):\n date= parse_date(bfo.fields('773__y')[0]+'0000')\n if date[0]:\n return date\n\n #date added\n if bfo.fields('961__x'):\n date= parse_date(bfo.fields('961__x')[0])\n if date[0]:\n return date\n\n\n return None", "def convert_date(datestring):\n datestring = datestring.rstrip('†')\n if datestring not in ('NA', 'None specified', 'TBA', 'None', 'N/A', ''):\n try:\n return dateutil_parser.parse(datestring).date()\n except ValueError: # dateutil's error messages aren't friendly\n raise ValueError(\"Not a date: {0}\".format(datestring))", "def format_date(self, date_val):\n try:\n if type(date_val) is not datetime:\n d = date.fromisoformat(date_val[0:10])\n else:\n d = date_val\n return d.strftime('%Y-%m-%d')\n except Exception as e:\n self.error((str(e)))", "def validate_date(value):\n if date_regex.fullmatch(value):\n return True\n else:\n return False", "def check_date(message, param):\n while True:\n try:\n day, month, year = input(message).split(param)\n return str(datetime.datetime(int(year), int(month), int(day)).strftime(\"%d/%m/%Y\"))\n except ValueError:\n continue", "def get_expiry_date(name: str, date_str: str):\n if 'IO' == name:\n # 沪深300, 到期月份的第三个星期五,遇国家法定假日顺延\n dates = THIRD_FRIDAYS[THIRD_FRIDAYS > date_str]\n day_str = get_next_trading_day_str(dates[0])\n elif name in ['cu', 'al', 'zn', 'au', 'ru']:\n # 上期所,标的期货合约到期日前一个月的倒数第 5 个交易日\n dates = LAST_BUSINESS_DAY[LAST_BUSINESS_DAY < date_str]\n day_str = get_next_trading_day_str(dates[-1], -5)\n elif name in ['m', 'c', 'i', 'pg', 'l', 'v', 'pp']:\n # 大商所,标的期货合约到期日前一个月的第 5 个交易日\n dates = FIRST_BUSINESS_DAY[FIRST_BUSINESS_DAY < date_str]\n day_str = get_next_trading_day_str(dates[-1], 5)\n elif 'SR' == name and date_str < '2019-09-01':\n # 郑商所,2019-09-01 之前为标的期货合约到期日前两个月的倒数第 5 个交易日\n dates = LAST_BUSINESS_DAY[LAST_BUSINESS_DAY < date_str]\n day_str = get_next_trading_day_str(dates[-2], -5)\n elif name in ['CF', 'SR', 'RM', 'MA', 'TA', 'ZC']:\n # 郑商所,标的期货合约到期日前一个月的第 3 个交易日\n dates = FIRST_BUSINESS_DAY[FIRST_BUSINESS_DAY < date_str]\n day_str = get_next_trading_day_str(dates[-1], 3)\n else:\n raise ValueError(f\"options contract not supported: {name}\")\n return day_str", "def test_validate_date_entry_returns_correct_ValueError(self):\n date_string = \"2018-21-01\"\n date_format = settings.DATE_FORMATS['iso 8601']\n\n error_text = \"{} is not valid in format {}\".format(\n date_string,\n date_format['UI format']\n )\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (error_text, None)\n\n self.assertEqual(result, expected_result)", "def date_from_string(my_string):\n if my_string:\n return datetime.strptime(my_string, DATE_FORMAT).date()\n return None", "def to_internal_value(self, value: str) -> date:\n return utils.date_from_iso_str(value)", "def _parse_date(date_string, date_type):\n # If date_string is None return None\n if date_string is None:\n return None\n\n # Parse rfc3339 dates from string\n elif date_type == \"rfc3339\":\n if date_string[-3] == \":\":\n date_string = date_string[:-3] + date_string[-2:]\n return datetime.datetime.strptime(date_string, \"%Y-%m-%dT%H:%M:%S%z\")\n\n # Parse date only strings\n elif date_type == \"date-only\":\n if re.match(r\"^(\\d){4}-00-00$\", date_string):\n return datetime.datetime.strptime(date_string, \"%Y-00-00\").date()\n else:\n return datetime.datetime.strptime(date_string, \"%Y-%m-%d\").date()\n \n elif date_type == \"date-time\":\n return datetime.datetime.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")", "def parse_date_str(self, date_str, date_format=None):\n if not date_format:\n date_format = self.parsing_date_format\n result = datetime.datetime.strptime(date_str, date_format).date()\n if result.year < 1900:\n # we have a typo in the house. Just use 2000 + last-two-digits\n year = (result.year % 100) + 2000\n result = result.replace(year=year)\n return result", "def convert_date(date):\n if isinstance(date, datetime.date):\n return date\n elif isinstance(date, str):\n match = DATE_PATTERN.match(date)\n if match:\n groups = match.groups()\n if len(groups) == 3:\n return datetime.date(\n year=int(\n groups[0]), month=int(\n groups[1]), day=int(\n groups[2]))\n return None", "def format_date(data, format_string='%Y-%m-%d'):\n if (data == '') or 'BC' in data:\n return None\n return datetime.strptime(data, format_string)", "def _sanitize_year(self, datestr):\n try:\n year = str(datetime.datetime.strptime(datestr, '%Y').date().year)\n except:\n try:\n year = str(datetime.datetime.strptime(datestr,\n '%Y-%m-%d').date().year)\n except:\n year = None\n return year", "def process_datetime(a_date: datetime) -> str:\n\n return str(a_date.date()) if a_date else Presenter.DEFAULT", "def hire_date(self):\n if \"hireDate\" in self._prop_dict:\n return datetime.strptime(self._prop_dict[\"hireDate\"].replace(\"Z\", \"\"), \"%Y-%m-%dT%H:%M:%S.%f\")\n else:\n return None", "def dateHandler(aDateString):\n day, month, year = ddmmyyyy_pattern.search(aDateString).groups()\n return (int(year), int(month), int(day), 0, 0, 0, 0, 0, 0)", "def clean_date(self):\n input_day = self.cleaned_data.get('day')\n input_date = self.cleaned_data.get('date')\n if input_date < datetime.date.today():\n raise forms.ValidationError(\"Can not create a lesson in the past.\")\n elif input_date.strftime(\"%A\").lower() != input_day:\n raise forms.ValidationError(input_date.strftime(\"%d-%m-%Y\")+\" does not fall on a \"+input_day.title()+\".\")\n return input_date", "def format_date(date_input):\n if date_input is None:\n return None\n if isinstance(date_input, datetime.datetime):\n if date_input.hour < 3 and not (date_input.hour == 0 and date_input.minute == 0 and date_input.second == 0):\n date_input = date_input - datetime.timedelta(days=1)\n return date_input.strftime(\"%Y-%m-%d\")\n elif isinstance(date_input, datetime.date):\n return date_input.strftime(\"%Y-%m-%d\")\n else:\n for format_string in ('%Y-%m-%d', '%m/%d/%y', '%Y-%m'):\n try:\n date_input = datetime.datetime.strptime(date_input, format_string)\n return date_input.strftime(\"%Y-%m-%d\")\n except ValueError:\n pass\n return None\n # raise ValueError('no valid date format found')", "def reformat_date(date_str: str) -> str:\n if date_str is None or date_str == '':\n return None\n [month_key, year] = date_str.split()\n return f'{year}-{MONTHS[month_key]}'", "def parse(self, str):\n values = self._exp.findall(str)\n if values is None or len(values) == 0:\n return None\n\n values = values[0]\n assert(len(values) == 3)\n\n day = int(values[self._dmy_idx[0]])\n month = int(values[self._dmy_idx[1]])\n year = int(values[self._dmy_idx[2]])\n\n return date(year, month, day)", "def get_date(self, string):\n # remove new lines\n string = string.replace('\\n', '')\n # first, get first digit - day is then number value of following 2 chars \n firstDigit = re.search('\\d', string)\n day = string[firstDigit.start():firstDigit.start()+2]\n day = self.find_number(day)\n # then get year - match 4 digits\n yearLoc = re.search(r'\\d{4}(?!\\d)', string)\n year = string[yearLoc.start():yearLoc.end()]\n # then get month\n monthLoc = re.search(r'[A-Z]{1}[a-z]{2}', string)\n month = string[monthLoc.start():monthLoc.end()]\n try:\n month = strptime(month, '%b').tm_mon\n date = dt.datetime(int(str(year)), int(str(month)), int(str(day)))\n except ValueError:\n pass\n date = np.NAN\n return date", "def validateDate(userId, key, date):\n if key == 'dateOfBirth' and date == None:\n return None\n\n if not isinstance(date, str):\n return {'error': 'invalid value: %s (%s), valid value date in ISO 8601 format' % (date, pythonTypeToJSONType(date))}\n\n if 'T' in date:\n try:\n datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%fZ')\n except TypeError:\n return {'error': 'invalid value: %s (%s), valid value date in ISO 8601 format' % (date, pythonTypeToJSONType(date))}\n except ValueError:\n return {'error': 'invalid value: %s, valid value date in ISO 8601 format' % date}\n else:\n try:\n datetime.datetime.strptime(date, '%Y-%m-%d')\n except TypeError:\n return {'error': 'invalid value: %s (%s), valid value date in ISO 8601 format' % (date, pythonTypeToJSONType(date))}\n except ValueError:\n return {'error': 'invalid value: %s, valid value date in ISO 8601 format' % date}\n\n if userId:\n q = Measurement.query.add_columns('id').filter_by(owner_id = userId).filter_by(measurementDate = date).first()\n if q:\n return {'error': 'duplicate value found for date %s (data id %s)' % (date, q[1])}\n else:\n return None", "def get_expiry():\n\n return get_or_append_details('expiry', \"Please enter your expiry date, two digits for the month and two digits for the year\")", "def convert_date(date):\n\n if len(date) > 10: date = date[:date.rfind(\"-\")]\n return convf(date)", "def normalize_date(__date, type='arrival'):\n\n if isinstance(__date, datetime.datetime):\n # If type is arrival pass RESERVATION_START_TIME as tup else RESERVATION_END_TIME as tup\n if type == 'arrival':\n tup = RESERVATION_START_TIME\n else:\n tup = RESERVATION_END_TIME\n\n __date = datetime.datetime(__date.year, __date.month, __date.day,\n tup[0], tup[1], tup[2])\n\n return __date\n return None", "def parse_date(date_str, date_re):\n date_str_tidy = date_str.replace('-', '')\n date_str_tidy = date_str_tidy.replace(' ', '')\n match = re.search(date_re, date_str_tidy)\n if match:\n year = match.group(3)\n if len(year) == 2:\n year = '19' + year\n try:\n date_utc = datetime.datetime(\n int(year), int(match.group(1)), int(match.group(2)),\n 0, 0, tzinfo=datetime.timezone.utc).isoformat()\n except:\n return (date_str, True)\n else:\n return (date_utc, False)\n else:\n return (date_str, True)" ]
[ "0.65887856", "0.63256896", "0.6298983", "0.622432", "0.62181807", "0.61893153", "0.614591", "0.61219233", "0.61208475", "0.6105145", "0.6070044", "0.6062799", "0.6033025", "0.6019465", "0.59857774", "0.59718645", "0.59547275", "0.593682", "0.5933253", "0.59316033", "0.59314114", "0.5896675", "0.58935726", "0.5860423", "0.5839755", "0.58291435", "0.5822639", "0.58221525", "0.5794308", "0.5743353", "0.57371545", "0.5735285", "0.5731587", "0.5716611", "0.5713112", "0.5712573", "0.57054543", "0.5703638", "0.56997603", "0.56946725", "0.56795967", "0.56703275", "0.5650576", "0.5643405", "0.5643219", "0.563982", "0.56369525", "0.56351984", "0.56244195", "0.56244195", "0.56220514", "0.5620579", "0.56175095", "0.5594699", "0.55785125", "0.5575073", "0.55748665", "0.5571522", "0.5569211", "0.5546154", "0.5538901", "0.5534016", "0.55224633", "0.5506429", "0.5506318", "0.54978484", "0.54976", "0.54951733", "0.5495171", "0.54906917", "0.54875386", "0.5469383", "0.54670966", "0.5464588", "0.54572123", "0.5456523", "0.5455427", "0.54448426", "0.5427592", "0.5415271", "0.5413352", "0.54098475", "0.5391111", "0.5384096", "0.5381736", "0.5372617", "0.5371137", "0.5371107", "0.5370288", "0.5361982", "0.53613615", "0.53594816", "0.53579485", "0.5354232", "0.53524566", "0.5350981", "0.534738", "0.53463054", "0.5345335", "0.533827" ]
0.61388487
7
r"""Pass the inputs (and mask) through the decoder layer.
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None): # type: (Tensor, Tensor, Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor]) -> Tensor #tgt=SpeechFeature #memory=Text Feature torch.cuda.empty_cache() tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0] tgt = tgt + self.dropout1(tgt2) tgt = self.norm1(tgt) tgt2 = self.multihead_attn(tgt, memory, memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0] tgt = tgt + self.dropout2(tgt2) tgt = self.norm2(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) tgt = tgt + self.dropout3(tgt2) tgt = self.norm3(tgt) #tgt=tgt.cpu() tgt2=tgt2.cpu() return tgt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decoder(self, embedded_inputs, decoder_input0,\n decoder_hidden0, encoder_outputs):\n pass", "def forward(self, src, tgt, enc_mask=None, dec_mask=None):\n m, enc_embed = self.encoder(src, enc_mask)\n # mask = mask.flatten(1)\n out, dec_embed = self.decoder(tgt, m, enc_embed, dec_mask) \n return out, m, enc_embed, dec_embed", "def forward(self, *args):\r\n enc_src, src_mask, src_inp = self.unified_encoder(*args)\r\n batch_size, _, hid_dim = src_inp.shape\r\n device = src_inp.device\r\n # enc_src = [batch size, src len, hid dim]\r\n # src_mask = [batch size, src len]\r\n\r\n trg_inp = torch.cat((torch.zeros((batch_size, 1, hid_dim), device=device), src_inp[:, :-1, :]), 1)\r\n decoder_output, attention = self.decoder(trg_inp, enc_src, src_mask)\r\n\r\n # decoder_output = [batch size, trg len, hid dim]\r\n # attention = [batch size, n heads, trg len, src len]\r\n\r\n outputs = self.output_layer(decoder_output)\r\n\r\n return outputs, attention", "def forward( self, target, encoder_output, attention_mask ):\n x = target\n for layer in self.decoder_layers:\n x = layer( x, encoder_output, attention_mask )\n return x", "def __call__(self, inputs: tf.Tensor, outputs: tf.Tensor,\n out_mask: tf.Tensor,in_pad_mask: tf.Tensor=None,\n training: bool=False):\n encoder_outputs, enc_attention = self.encoder(\n inputs, in_pad_mask, training\n )\n # shape=(batch_size, inputs_seq_len, d_model)\n\n decoder_outputs, dec_self_attention, dec_enc_attention = self.decoder(\n outputs, encoder_outputs, out_mask, in_pad_mask, training\n )\n # shape=(batch_size, outputs_seq_len, d_model)\n\n attentions = {'encoder': enc_attention,\n 'decoder_self': dec_self_attention,\n 'decoder_encoder': dec_enc_attention}\n\n return decoder_outputs, attentions", "def decode(self):\n decoder_input = Input(shape=self.input_decoder_shape, batch_shape=self.input_batch_decoder_shape)\n ppg_input = Input(shape=self.input_ppg_shape, batch_shape=self.input_batch_ppg_shape)\n\n if self.hparams.Masking is True:\n mask_decoder_input = Masking(mask_value=0)(decoder_input)\n mask_ppg_input = Masking(mask_value=0)(ppg_input)\n prenet_output = self.PreNet(mask_decoder_input)\n encoder_input = self.Encoder(mask_ppg_input)\n decoder_mask = None\n else:\n decoder_mask = Masking(mask_value=0).compute_mask(ppg_input)\n prenet_output = self.PreNet(decoder_input)\n encoder_input = self.Encoder(ppg_input, decoder_mask)\n\n rnn_output = Concatenate(axis=-1)([prenet_output, encoder_input])\n # mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n # diff_mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n for i in range(self.hparams.Tacotron_decoder_layers):\n rnn_output = self.Decoder_LSTM[i](rnn_output, mask=decoder_mask)\n\n # feed by self.states is unhelpful in training, since we don't stop rnn during epochs\n # but it is important in generating since each fit states will be set to zeros.!!!!!!\n rnn_output = Concatenate(axis=-1)([rnn_output, encoder_input])\n decoder_output = self.Linear_projection(rnn_output)\n if self.hparams.Tacotron_postnet is True:\n residual_output = decoder_output\n for i in range(self.hparams.PostNet_layers):\n residual_output = self.PostNet_Conv1D[i](residual_output)\n residual_output = self.PostNet_BatchNorm[i](residual_output)\n residual_output = self.PostNet_dropout_list[i](residual_output)\n decoder_output = Add()([decoder_output, residual_output])\n return Model(inputs=[decoder_input, ppg_input], outputs=decoder_output)", "def decoder_model(self, input_shape, layers, filters, latent_dims):\n\n flat_dims = self.encoder.get_layer('Flatten_dims').output_shape\n pool_dims = self.encoder.get_layer('Flatten_dims').input_shape\n\n latent_inputs = Input(shape=(latent_dims,), name='z_sampling')\n latent_to_reshape = Dense(flat_dims[-1], activation='relu')(latent_inputs)\n reshape_to_up = Reshape(pool_dims[1:])(latent_to_reshape)\n \n l = [reshape_to_up]\n\n for i in range(0,layers):\n l.append(UpSampling2D(size=(2,2), data_format='channels_last',\n name='Upsample_'+str(i))(l[i*2]))\n l.append(Conv2D(filters[-i-1], (3,3), padding='same',\n data_format='channels_last', name='DeConv_'+str(i),\n activation='relu')(l[i*2+1]))\n\n l.append(Conv2D(1, (3,3), padding='same',\n data_format='channels_last', name='decoder_output',\n activation='sigmoid')(l[-1]))\n\n decoder = Model(latent_inputs, l[-1], name='decoder')\n\n decoder.summary()\n\n return decoder", "def forward(self, inputs, decode_len=None):\n\n batch_size = inputs.size(0)\n input_dim = inputs.size(1)\n assert input_dim == self.input_dim, 'input dim should be {:d} but now: {:d}'.format(self.input_dim, input_dim)\n\n sourceL = inputs.size(2)\n\n if self.embed_input:\n # repeat embeddings across batch_size\n # result is [batch_size x input_dim x embedding_dim]\n # TODO: repeat or expand?\n embedding = self.embedding.repeat(batch_size, 1, 1)\n embedded_inputs = []\n # result is [batch_size, 1, input_dim, sourceL]\n ips = inputs.unsqueeze(1)\n\n for i in range(sourceL):\n # [batch_size x 1 x input_dim] * [batch_size x input_dim x embedding_dim]\n # result is [batch_size, embedding_dim]\n embedded_inputs.append(torch.bmm(\n ips[:, :, :, i].float(),\n embedding).squeeze(1))\n\n # Result is [sourceL x batch_size x embedding_dim]\n embedded_inputs = torch.cat(embedded_inputs).view(\n sourceL,\n batch_size,\n embedding.size(2))\n else:\n embedded_inputs = inputs.permute(2, 0, 1)\n\n (encoder_hx, encoder_cx) = init_zero_hidden(self.hidden_dim, inputs.is_cuda)\n encoder_hx = encoder_hx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\n encoder_cx = encoder_cx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\n \n # encoder forward pass\n enc_h, (enc_h_t, enc_c_t) = self.encoder(embedded_inputs, (encoder_hx, encoder_cx))\n\n enc_h_linear = enc_h.view(-1, self.hidden_dim)\n # enc_h_linear_2d = enc_h_linear.view(self.hidden_dim, -1)\n enc_action_scores = self.EncodeScore(enc_h_linear)\n enc_action_scores = enc_action_scores.view(-1, batch_size).permute(1, 0)\n dec_init_state = (enc_h_t[-1], enc_c_t[-1])\n \n # repeat decoder_in_0 across batch\n decoder_input = self.decoder_in_0.unsqueeze(0).repeat(embedded_inputs.size(1), 1)\n\n (head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores), dec_hidden_t = self.decoder(decoder_input,\n embedded_inputs,\n dec_init_state,\n enc_h, max_len=decode_len)\n #TODO: added conversion to tensors\n head_pointer_probs = torch.stack(head_pointer_probs)\n head_pointer_probs = head_pointer_probs.permute(1, 0, 2)\n tail_pointer_probs = torch.stack(tail_pointer_probs)\n tail_pointer_probs = tail_pointer_probs.permute(1, 0, 2)\n cls_scores = torch.stack(cls_scores)\n cls_scores = cls_scores.permute(1, 0, 2)\n head_positions = torch.stack(head_positions)\n head_positions = head_positions.permute(1, 0)\n tail_positions = torch.stack(tail_positions)\n tail_positions = tail_positions.permute(1, 0)\n\n\n\n return head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, enc_action_scores", "def forward(self, x, src_states, src_mask, tgt_mask):\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))\n #print('decoder')\n #print(x.shape)\n x = self.sublayer[1](x, lambda x: self.src_attn(x, src_states, src_states, src_mask))\n #print(x.shape)\n return self.sublayer[2](x, self.feed_forward)", "def forward(self, src, mask):\n bs = src.shape[0]\n src = src.permute(2, 0, 1)\n m = src \n enc_embed = self.enc_embed.weight.unsqueeze(1).repeat(1, bs, 1)\n for layer in self.encoder_layers:\n m = layer(m,\n pos=enc_embed,\n src_mask = mask\n )\n return m.permute(1, 2, 0), enc_embed.permute(1, 2, 0)", "def forward(self, tgt, m, enc_embed, mask):\n bs = tgt.shape[0]\n enc_embed = enc_embed.permute(2, 0, 1)\n m = m.permute(2, 0, 1)\n tgt = tgt.permute(2, 0, 1)\n dec_embed = self.dec_embed.weight.unsqueeze(1).repeat(1, bs, 1)\n\n out = tgt\n for layer in self.decoder_layers:\n out = layer(out, m, \n pos=enc_embed,\n query_pos=dec_embed\n )\n \n return self.decoder_norm(out).permute(1, 2, 0), dec_embed.permute(1, 2, 0)", "def _decode(self, input_dict):\n encoder_outputs = input_dict['encoder_output']['outputs']\n enc_src_lengths = input_dict['encoder_output']['src_length']\n if self._mode == 'train':\n spec = (\n input_dict['target_tensors'][0]\n if 'target_tensors' in input_dict\n else None\n )\n spec_length = (\n input_dict['target_tensors'][1]\n if 'target_tensors' in input_dict\n else None\n )\n\n _batch_size = tf.shape(encoder_outputs)[0]\n\n training = self._mode == 'train'\n regularizer = self.params.get('regularizer', None)\n\n if self.params.get('enable_postnet', True):\n if 'postnet_conv_layers' not in self.params:\n raise ValueError(\n 'postnet_conv_layers must be passed from config file if postnet is'\n 'enabled'\n )\n\n num_audio_features = self._n_feats\n\n output_projection_layer = tf.layers.Dense(\n name='output_proj', units=num_audio_features, use_bias=True\n )\n stop_token_projection_layer = tf.layers.Dense(\n name='stop_token_proj', units=1, use_bias=True\n )\n\n prenet = None\n if self.params.get('enable_prenet', True):\n prenet = Prenet(\n self.params.get('prenet_units', 256),\n self.params.get('prenet_layers', 2),\n self.params.get('prenet_dropout', 0.5),\n self.params.get('prenet_enable_dropout', True),\n self.params.get('prenet_activation', tf.nn.relu),\n self.params['dtype'],\n )\n\n cell_params = {}\n cell_params['num_units'] = self.params['decoder_cell_units']\n decoder_cells = [\n single_cell(\n cell_class=self.params['decoder_cell_type'],\n cell_params=cell_params,\n zoneout_prob=self.params.get('zoneout_prob', 0.0),\n dp_output_keep_prob=1.0\n - self.params.get('dropout_prob', 0.1),\n training=training,\n )\n for _ in range(self.params['decoder_layers'])\n ]\n\n if self.params['attention_type'] is not None:\n attention_mechanism = self._build_attention(\n encoder_outputs,\n enc_src_lengths,\n self.params.get('attention_bias', False),\n )\n\n attention_cell = tf.contrib.rnn.MultiRNNCell(decoder_cells)\n\n attentive_cell = AttentionWrapper(\n cell=attention_cell,\n attention_mechanism=attention_mechanism,\n alignment_history=True,\n output_attention='both',\n )\n\n decoder_cell = attentive_cell\n\n if self.params['attention_type'] is None:\n decoder_cell = tf.contrib.rnn.MultiRNNCell(decoder_cells)\n\n if self._mode == 'train':\n train_and_not_sampling = True\n helper = TacotronTrainingHelper(\n inputs=spec,\n sequence_length=spec_length,\n prenet=None,\n model_dtype=self.params['dtype'],\n mask_decoder_sequence=self.params.get(\n 'mask_decoder_sequence', True\n ),\n )\n elif self._mode == 'eval' or self._mode == 'infer':\n train_and_not_sampling = False\n inputs = tf.zeros(\n (_batch_size, 1, num_audio_features),\n dtype=self.params['dtype'],\n )\n helper = TacotronHelper(\n inputs=inputs,\n prenet=None,\n mask_decoder_sequence=self.params.get(\n 'mask_decoder_sequence', True\n ),\n )\n else:\n raise ValueError('Unknown mode for decoder: {}'.format(self._mode))\n decoder = TacotronDecoder(\n decoder_cell=decoder_cell,\n helper=helper,\n initial_decoder_state=decoder_cell.zero_state(\n _batch_size, self.params['dtype']\n ),\n attention_type=self.params['attention_type'],\n spec_layer=output_projection_layer,\n stop_token_layer=stop_token_projection_layer,\n prenet=prenet,\n dtype=self.params['dtype'],\n train=train_and_not_sampling,\n )\n\n if self._mode == 'train':\n maximum_iterations = tf.reduce_max(spec_length)\n else:\n maximum_iterations = tf.reduce_max(enc_src_lengths) * 10\n\n outputs, final_state, sequence_lengths = tf.contrib.seq2seq.dynamic_decode(\n # outputs, final_state, sequence_lengths, final_inputs = dynamic_decode(\n decoder=decoder,\n impute_finished=False,\n maximum_iterations=maximum_iterations,\n swap_memory=self.params.get('use_swap_memory', False),\n output_time_major=self.params.get('time_major', False),\n parallel_iterations=self.params.get('parallel_iterations', 32),\n )\n\n decoder_output = outputs.rnn_output\n stop_token_logits = outputs.stop_token_output\n\n with tf.variable_scope('decoder'):\n # If we are in train and doing sampling, we need to do the projections\n if train_and_not_sampling:\n decoder_spec_output = output_projection_layer(decoder_output)\n stop_token_logits = stop_token_projection_layer(\n decoder_spec_output\n )\n decoder_output = decoder_spec_output\n\n ## Add the post net ##\n if self.params.get('enable_postnet', True):\n dropout_keep_prob = self.params.get(\n 'postnet_keep_dropout_prob', 0.5\n )\n\n top_layer = decoder_output\n for i, conv_params in enumerate(self.params['postnet_conv_layers']):\n ch_out = conv_params['num_channels']\n kernel_size = conv_params['kernel_size'] # [time, freq]\n strides = conv_params['stride']\n padding = conv_params['padding']\n activation_fn = conv_params['activation_fn']\n\n if ch_out == -1:\n ch_out = self._n_feats\n\n top_layer = conv_bn_actv(\n layer_type='conv1d',\n name='conv{}'.format(i + 1),\n inputs=top_layer,\n filters=ch_out,\n kernel_size=kernel_size,\n activation_fn=activation_fn,\n strides=strides,\n padding=padding,\n regularizer=regularizer,\n training=training,\n data_format=self.params.get(\n 'postnet_data_format', 'channels_last'\n ),\n bn_momentum=self.params.get('postnet_bn_momentum', 0.1),\n bn_epsilon=self.params.get('postnet_bn_epsilon', 1e-5),\n )\n top_layer = tf.layers.dropout(\n top_layer,\n rate=1.0 - dropout_keep_prob,\n training=training,\n )\n\n else:\n top_layer = tf.zeros(\n [\n _batch_size,\n maximum_iterations,\n outputs.rnn_output.get_shape()[-1],\n ],\n dtype=self.params['dtype'],\n )\n\n if regularizer and training:\n vars_to_regularize = []\n vars_to_regularize += attentive_cell.trainable_variables\n vars_to_regularize += (\n attention_mechanism.memory_layer.trainable_variables\n )\n vars_to_regularize += output_projection_layer.trainable_variables\n vars_to_regularize += (\n stop_token_projection_layer.trainable_variables\n )\n\n for weights in vars_to_regularize:\n if 'bias' not in weights.name:\n # print(\"Added regularizer to {}\".format(weights.name))\n if weights.dtype.base_dtype == tf.float16:\n tf.add_to_collection(\n 'REGULARIZATION_FUNCTIONS', (weights, regularizer)\n )\n else:\n tf.add_to_collection(\n ops.GraphKeys.REGULARIZATION_LOSSES,\n regularizer(weights),\n )\n\n if self.params.get('enable_prenet', True):\n prenet.add_regularization(regularizer)\n\n if self.params['attention_type'] is not None:\n alignments = tf.transpose(\n final_state.alignment_history.stack(), [1, 2, 0]\n )\n else:\n alignments = tf.zeros([_batch_size, _batch_size, _batch_size])\n\n spectrogram_prediction = decoder_output + top_layer\n\n mag_spec_prediction = tf.zeros([_batch_size, _batch_size, _batch_size])\n\n stop_token_prediction = tf.sigmoid(stop_token_logits)\n outputs = [\n decoder_output,\n spectrogram_prediction,\n alignments,\n stop_token_prediction,\n sequence_lengths,\n mag_spec_prediction,\n ]\n\n return {'outputs': outputs, 'stop_token_prediction': stop_token_logits}", "def forward(self, inputs_encoder, inputs_decoder):\n states_encoder = self.encoder(inputs_encoder)\n outputs_decoder, states_decoder = self.decoder(inputs_decoder, states_encoder)\n return outputs_decoder, states_decoder", "def _define_decoder(self):\n self.decoder = nn.Sequential(nn.Linear(self.encoding_shape, 256), # B, 256\n View((-1, 256, 1, 1)), # B, 256, 1, 1\n nn.SELU(),\n nn.ConvTranspose2d(256, 64, 4), # B, 64, 4, 4\n nn.SELU(),\n nn.ConvTranspose2d(64, 64, 4, 2, 1), # B, 64, 8, 8\n nn.SELU(),\n nn.ConvTranspose2d(64, 32, 4, 2, 1), # B, 32, 16, 16\n nn.SELU(),\n nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 32, 32\n nn.SELU(),\n nn.ConvTranspose2d(32, 3, 4, 2, 1), # B, nc, 64, 64\n nn.ReLU()\n )", "def __call__(self,\n encoder_input_tokens,\n decoder_input_tokens,\n decoder_target_tokens,\n encoder_segment_ids=None,\n decoder_segment_ids=None,\n encoder_positions=None,\n decoder_positions=None,\n *,\n enable_dropout: bool = True,\n decode: bool = False):\n encoded = self.encode(\n encoder_input_tokens,\n encoder_segment_ids=encoder_segment_ids,\n enable_dropout=enable_dropout)\n\n return self.decode(\n encoded,\n encoder_input_tokens, # only used for masks\n decoder_input_tokens,\n decoder_target_tokens,\n encoder_segment_ids=encoder_segment_ids,\n decoder_segment_ids=decoder_segment_ids,\n decoder_positions=decoder_positions,\n enable_dropout=enable_dropout,\n decode=decode)", "def _define_decoder(self):\n self.decoder = nn.Sequential(nn.Linear(self.encoding_shape, 512, bias=False), nn.SELU(),\n nn.BatchNorm1d(512),\n nn.Linear(512, 2560, bias=False), nn.SELU(),\n nn.BatchNorm1d(2560),\n nn.Linear(2560, 5120, bias=False), nn.SELU(),\n nn.BatchNorm1d(5120),\n nn.Linear(5120, 64*64*3, bias=False), nn.ReLU(),\n View((-1, 3, 64, 64)),\n )", "def _decoder(self, inputs, z_dimension, mcd):\n \n latent_inputs = Input(shape=(z_dimension,), name=\"z_sampling\")\n x = latent_inputs\n x = Dense(\n self.hidden_size // 4,\n activation=self.activation_func,\n kernel_initializer=self.weight_init,\n )(x)\n x = Dropout(self.dropout_probability)(x, training=mcd)\n\n x = Dense(\n self.hidden_size // 3,\n activation=self.activation_func,\n kernel_initializer=self.weight_init,\n )(x)\n x = Dropout(self.dropout_probability)(x, training=mcd)\n\n x = Dense(\n self.hidden_size // 2,\n activation=self.activation_func,\n kernel_initializer=self.weight_init,\n )(x)\n x = Dropout(self.dropout_probability)(x, training=mcd)\n\n x = Dense(\n self.hidden_size,\n activation=self.activation_func,\n kernel_initializer=self.weight_init,\n )(x)\n x = Dropout(self.dropout_probability)(x, training=mcd)\n outputs = Dense(\n self.n_dims,\n activation=self.output_activation,\n kernel_initializer=self.weight_init,\n )(x)\n \n self.decoder = Model(latent_inputs, outputs, name=\"decoder\")\n \n outputs = self.decoder(self.encoder(inputs)[0])\n \n return self.decoder, outputs", "def call(self, inputs, cache=None, decode_loop_step=None):\n decoder_inputs = inputs[\"decoder_inputs\"]\n encoder_outputs = inputs[\"encoder_outputs\"]\n self_attention_mask = inputs[\"self_attention_mask\"]\n attention_mask = inputs[\"attention_mask\"]\n decoder_shape = tf_utils.get_shape_list(decoder_inputs, expected_rank=3)\n batch_size = decoder_shape[0]\n decoder_length = decoder_shape[1]\n\n def _to_bert_self_attention_mask(matrix):\n \"\"\"[1, 1, target_len, target_len] -> [bs, target_len, target_len].\"\"\"\n matrix = tf.squeeze(matrix, axis=[1])\n matrix = tf.tile(matrix, [batch_size, 1, 1])\n return matrix\n\n def _to_bert_encdec_attention_mask(matrix):\n \"\"\"[bs, 1, 1, input_len] -> [bs, target_len, input_len].\"\"\"\n if self.multi_channel_cross_attention:\n matrix = tf.expand_dims(matrix, axis=2)\n matrix = tf.tile(matrix, [1, 1, decoder_length, 1])\n else:\n matrix = tf.squeeze(matrix, axis=[1])\n matrix = tf.tile(matrix, [1, decoder_length, 1])\n return matrix\n\n attention_mask = _to_bert_encdec_attention_mask(attention_mask)\n self_attention_mask = _to_bert_self_attention_mask(self_attention_mask)\n\n output_tensor = decoder_inputs\n for layer_idx in range(self.num_hidden_layers):\n if self.attend_to_last_layer:\n memory = encoder_outputs[-1]\n else:\n memory = encoder_outputs[layer_idx]\n if self.multi_channel_cross_attention:\n transformer_inputs = [\n output_tensor, memory, attention_mask, self_attention_mask,\n inputs[\"doc_attention_probs\"]\n ]\n else:\n transformer_inputs = [\n output_tensor, memory, attention_mask, self_attention_mask\n ]\n # Gets the cache for decoding.\n if cache is None:\n output_tensor, _ = self.layers[layer_idx](transformer_inputs)\n else:\n cache_layer_idx = str(layer_idx)\n output_tensor, cache[cache_layer_idx] = self.layers[layer_idx](\n transformer_inputs,\n cache=cache[cache_layer_idx],\n decode_loop_step=decode_loop_step)\n return output_tensor, cache", "def call(self, inputs, target, training, encoder_mask,\n look_ahead_mask, decoder_mask):\n enc_output = self.encoder(inputs, training, encoder_mask)\n\n dec_output, attention = self.decoder(target, enc_output, training,\n look_ahead_mask, decoder_mask)\n\n final_output = self.linear(dec_output)\n\n return final_output, attention", "def decode(self, decoder_input, sampler_output):\n\n self.attention_hidden, self.attention_cell = self.attention_lstm(\n decoder_input, (self.attention_hidden, self.attention_cell))\n self.attention_hidden = F.dropout(\n self.attention_hidden, self.p_attention_dropout, self.training)\n\n self.decoder_hidden, self.decoder_cell = self.decoder_lstm(\n self.attention_hidden, (self.decoder_hidden, self.decoder_cell))\n self.decoder_hidden = F.dropout(\n self.decoder_hidden, self.p_decoder_dropout, self.training)\n\n # print(self.decoder_hidden.size())\n # print(self.decoder_hidden.size(), sampler_output.size())\n proj_input = torch.cat(\n (self.decoder_hidden, sampler_output), 1) # [B, 1024 + 1280]\n\n decoder_output = self.linear_projection(proj_input)\n\n return decoder_output", "def forward(self, *args):\r\n _, (hn, cn) = self.enc(*args)\r\n\r\n device = hn.device\r\n seq_cont_data = args[1]\r\n seq_cat_data = args[0]\r\n batch_size = seq_cont_data.shape[0]\r\n\r\n decoder_input_cont = torch.cat((torch.zeros(\r\n batch_size, 1, seq_cont_data.shape[2], device=device),\r\n seq_cont_data[:, self.decoder_start:self.seq_len-1, :]), 1)\r\n decoder_input_cat = torch.cat((torch.zeros(\r\n batch_size, 1, seq_cat_data.shape[2], device=device),\r\n seq_cat_data[:, self.decoder_start:self.seq_len-1, :]), 1)\r\n decoder_out_cont, decoder_out_cat, (hn, cn) = self.dec(decoder_input_cont, decoder_input_cat, hidden=(hn, cn))\r\n\r\n return decoder_out_cont, decoder_out_cat", "def __call__(self, encoder_inputs, attention_bias, inputs_padding):\n for n, layer in enumerate(self.layers):\n # Run inputs through the sublayers.\n self_attention_layer = layer[0]\n feed_forward_network = layer[1]\n\n with tf.variable_scope(\"encoder_layer_%d\" % n):\n with tf.variable_scope(\"self_attention\"):\n encoder_inputs = self_attention_layer(encoder_inputs, attention_bias)\n with tf.variable_scope(\"ffn\"):\n encoder_inputs = feed_forward_network(encoder_inputs, inputs_padding)\n\n with tf.variable_scope(\"encoder_output\"):\n output = self.output_normalization(encoder_inputs)\n\n return output # self.output_normalization(encoder_inputs)", "def forward(self, inputs):\n #NOTE: Already merge axis 0(batches) and axis 1(channels) before extracting feature phase,\n # please refer to paddlevideo/modeling/framework/recognizers/recognizer2d.py#L27\n #y = paddle.reshape(\n # inputs, [-1, inputs.shape[2], inputs.shape[3], inputs.shape[4]])\n\n ####ResNet-C: use three 3x3 conv, replace, one 7x7 conv\n y = self.conv1_1(inputs)\n y = self.conv1_2(y)\n y = self.conv1_3(y)\n\n y = self.pool2D_max(y)\n for block in self.block_list:\n y = block(y)\n return y", "def forward(self, x, mask):\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n #print('encoder')\n #print(x.shape)\n return self.sublayer[1](x, self.feed_forward)", "def _add_input_decoder(self, inputs, seq_len, enc_fw, enc_bw):\n with tf.variable_scope(\"decoder\"):\n cell_fw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n ((fw_states, bw_states), (final_fw, final_bw)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True, initial_state_fw=enc_fw, initial_state_bw=enc_bw)\n\n return fw_states, bw_states", "def forward(self, x, mask):\n # encoder process\n b, n, h, w = x.size()\n if h != 256 or w != 256: # resize the image to fixed size for inpainting\n x = nn.functional.interpolate(x, (256, 256))\n mask = torch.nn.functional.interpolate(mask, (256, 256))\n features, f_m_c = self.net_E(x, mask=mask)\n # decoder process\n f_in, f_e, scale_mask = self.get_G_inputs(features, f_m_c, mask)\n g_img, attn_f = self.net_G(f_in, f_e, scale_mask)\n if h != 256 or w != 256:\n for i in range(len(g_img)):\n g_img[i] = nn.functional.interpolate(g_img[i], (h, w))\n return g_img", "def decoder(self, in_channels, out_channels, kernel_size, stride=1, padding=1, output_padding=0, batchnorm=True, bias=False, n_convs=1):\n mods = []\n out = in_channels\n\n for n in range(n_convs):\n if n == n_convs - 1:\n out = out_channels \n mods.append(nn.Conv3d(in_channels, out, kernel_size, stride=stride,\n padding=padding, bias=bias))\n # TODO: Check batchnorm?\n if batchnorm:\n mods.append(nn.BatchNorm3d(out))\n mods.append(nn.ReLU())\n\n layer = nn.Sequential(*mods)\n\n return layer", "def __init__(self,\r\n unified_encoder,\r\n decoder,\r\n output_layer):\r\n super().__init__()\r\n\r\n self.unified_encoder = unified_encoder\r\n self.decoder = decoder\r\n self.output_layer = output_layer", "def call(self,\n decoder_inputs,\n encoder_outputs,\n decoder_self_attention_bias,\n attention_bias,\n training,\n cache=None):\n # Run values\n outputs = self._transformer_decoder(\n decoder_inputs,\n encoder_outputs,\n decoder_self_attention_bias,\n attention_bias,\n training=training,\n cache=cache)\n return outputs", "def forward(\r\n self,\r\n trg_embed: Tensor = None,\r\n encoder_output: Tensor = None,\r\n src_mask: Tensor = None,\r\n trg_mask: Tensor = None,\r\n **kwargs\r\n ):\r\n assert trg_mask is not None, \"trg_mask required for Transformer\"\r\n\r\n x = self.pe(trg_embed) # add position encoding to word embedding\r\n x = self.emb_dropout(x)\r\n\r\n trg_mask = trg_mask & subsequent_mask(trg_embed.size(1)).type_as(trg_mask)\r\n\r\n\r\n for layer in self.layers:\r\n x = layer(x=x, memory=encoder_output, src_mask=src_mask, trg_mask=trg_mask)\r\n\r\n x = self.layer_norm(x)\r\n output = self.output_layer(x)\r\n\r\n return output", "def __init__(self, # noqa: R0913\n hid_dim,\n n_layers,\n n_heads,\n pf_dim,\n dropout,\n pos_embedding):\n super().__init__()\n\n self.pos_embedding = pos_embedding\n\n self.layers = nn.ModuleList([TransformerDecoderLayer(hid_dim,\n n_heads,\n pf_dim,\n dropout)\n for _ in range(n_layers)])\n\n self.dropout = nn.Dropout(dropout)\n\n self.register_buffer('scale', torch.sqrt(torch.FloatTensor([hid_dim])))", "def __call__(self,\n input_data,\n input_mask):\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n input_block = input_data\n input_block_mask = input_mask\n \n for block_layer in self.block_layer_list:\n input_block, input_block_mask = block_layer(input_block, input_block_mask)\n \n output_block = input_block\n output_mask = input_block_mask\n \n return output_block, output_mask", "def call_(self, inputs, training=None, mask=None, index=None):\n return self.decode_(self.encode_(inputs))", "def __call__(self,\n input_char,\n input_char_mask):\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n input_char_embedding_mask = tf.expand_dims(input_char_mask, axis=-1)\n input_char_embedding = self.embedding_layer(input_char)\n \n (input_char_dropout,\n input_char_dropout_mask) = self.dropout_layer(input_char_embedding, input_char_embedding_mask)\n \n (input_char_conv,\n input_char_conv_mask) = self.conv_layer(input_char_dropout, input_char_dropout_mask)\n \n (input_char_pool,\n input_char_pool_mask) = self.pooling_layer(input_char_conv, input_char_conv_mask)\n \n input_char_feat = input_char_pool\n input_char_feat_mask = input_char_pool_mask\n \n return input_char_feat, input_char_feat_mask", "def long_answer_prepare_decoder(inputs, targets, hparams):\n decoder_input = tf.concat([\n length_embedding(targets, hparams), inputs,\n common_layers.shift_left_3d(targets)], 1)\n if hparams.pos == \"timing\":\n decoder_input = common_attention.add_timing_signal_1d(decoder_input)\n return decoder_input", "def call(self,\n inputs,\n cache=None,\n decode_loop_step=None,\n padded_decode=False):\n attention_bias = inputs[\"attention_bias\"]\n target_ids = inputs[\"target_ids\"]\n all_encoder_outputs = inputs[\"all_encoder_outputs\"]\n self_attention_bias = inputs[\"self_attention_bias\"]\n if not isinstance(all_encoder_outputs, list):\n all_encoder_outputs = [all_encoder_outputs]\n\n target_embeds = self.embedding_lookup(target_ids)\n if decode_loop_step is None:\n target_embeds = self.embedding_postprocessor(target_embeds)\n else:\n target_embeds = self._decoding_step_time_signal(target_embeds,\n decode_loop_step)\n decoder_inputs = dict(\n decoder_inputs=target_embeds,\n encoder_outputs=all_encoder_outputs,\n self_attention_mask=self_attention_bias,\n attention_mask=attention_bias)\n if self.multi_channel_cross_attention:\n decoder_inputs[\"doc_attention_probs\"] = inputs[\"doc_attention_probs\"]\n decode_outputs, cache = self.decoder(\n decoder_inputs, cache, decode_loop_step if padded_decode else None)\n return decode_outputs", "def __init__(self,\n encoder: nn.Module,\n decoder: nn.Module,\n **kwargs):\n super().__init__(**kwargs)\n\n # set encoder and decoder\n self.encoder = encoder\n self.decoder = decoder\n\n # send to device\n self.to(self.device)", "def _prepare_bart_decoder_inputs(\r\n config, input_ids, decoder_input_ids=None, decoder_padding_mask=None, causal_mask_dtype=torch.float32\r\n):\r\n pad_token_id = config.pad_token_id\r\n if decoder_input_ids is None:\r\n decoder_input_ids = shift_tokens_right(input_ids, pad_token_id)\r\n bsz, tgt_len = decoder_input_ids.size()\r\n if decoder_padding_mask is None:\r\n decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id)\r\n else:\r\n decoder_padding_mask = invert_mask(decoder_padding_mask)\r\n if decoder_padding_mask is not None and decoder_padding_mask.shape[1] > 1:\r\n # never mask leading token, even if it is pad\r\n decoder_padding_mask[:, 0] = decoder_padding_mask[:, 1]\r\n tmp = fill_with_neg_inf(torch.zeros(tgt_len, tgt_len))\r\n mask = torch.arange(tmp.size(-1))\r\n tmp.masked_fill_(mask < (mask + 1).view(tmp.size(-1), 1), 0)\r\n causal_mask = tmp.to(dtype=causal_mask_dtype, device=decoder_input_ids.device)\r\n return decoder_input_ids, decoder_padding_mask, causal_mask", "def forward(\r\n self,\r\n input_ids,\r\n encoder_hidden_states,\r\n encoder_padding_mask,\r\n decoder_padding_mask,\r\n decoder_causal_mask,\r\n past_key_values=None,\r\n use_cache=False,\r\n output_attentions=False,\r\n output_hidden_states=False,\r\n return_dict=False,\r\n **unused,\r\n ):\r\n\r\n if \"decoder_cached_states\" in unused:\r\n warnings.warn(\r\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\r\n FutureWarning,\r\n )\r\n past_key_values = unused.pop(\"decoder_cached_states\")\r\n if \"decoder_past_key_values\" in unused:\r\n warnings.warn(\r\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\r\n FutureWarning,\r\n )\r\n past_key_values = unused.pop(\"decoder_past_key_values\")\r\n\r\n # check attention mask and invert\r\n if encoder_padding_mask is not None:\r\n encoder_padding_mask = invert_mask(encoder_padding_mask)\r\n\r\n # embed positions\r\n positions = self.embed_positions(input_ids, use_cache=use_cache)\r\n\r\n if use_cache:\r\n input_ids = input_ids[:, -1:]\r\n positions = positions[:, -1:]\r\n\r\n x = self.embed_tokens(input_ids) * self.embed_scale\r\n if self.do_blenderbot_90_layernorm:\r\n x = self.layernorm_embedding(x)\r\n x += positions\r\n else:\r\n x += positions\r\n x = self.layernorm_embedding(x)\r\n\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n\r\n # Convert to Bart output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\r\n x = x.transpose(0, 1)\r\n encoder_hidden_states = encoder_hidden_states.transpose(0, 1)\r\n\r\n # decoder layers\r\n all_hidden_states = () if output_hidden_states else None\r\n all_self_attns = () if output_attentions else None\r\n enc_dec_all_attn = () if output_attentions else None\r\n next_decoder_cache = []\r\n for idx, decoder_layer in enumerate(self.layers):\r\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\r\n if output_hidden_states:\r\n all_hidden_states += (x,)\r\n dropout_probability = random.uniform(0, 1)\r\n if self.training and (dropout_probability < self.layerdrop):\r\n continue\r\n\r\n layer_state = past_key_values[idx] if past_key_values is not None else None\r\n\r\n #isidora - in comment\r\n \"\"\"\r\n x, layer_self_attn, layer_past,_ = decoder_layer(\r\n x,\r\n encoder_hidden_states,\r\n encoder_attn_mask=encoder_padding_mask,\r\n decoder_padding_mask=decoder_padding_mask,\r\n layer_state=layer_state,\r\n causal_mask=decoder_causal_mask,\r\n output_attentions=output_attentions,\r\n )\r\n \"\"\"\r\n\r\n #isidora - start - replace _ with enc_dec_attn to get the encoder-decoder attn weights\r\n x, layer_self_attn, layer_past, enc_dec_attn = decoder_layer(\r\n x,\r\n encoder_hidden_states,\r\n encoder_attn_mask=encoder_padding_mask,\r\n decoder_padding_mask=decoder_padding_mask,\r\n layer_state=layer_state,\r\n causal_mask=decoder_causal_mask,\r\n output_attentions=output_attentions,\r\n )\r\n #isidora - end\r\n\r\n\r\n if use_cache:\r\n next_decoder_cache.append(layer_past.copy())\r\n\r\n if output_attentions:\r\n all_self_attns += (layer_self_attn,)\r\n enc_dec_all_attn += (enc_dec_attn,)\r\n\r\n if self.layer_norm: # if config.add_final_layer_norm (mBART)\r\n x = self.layer_norm(x)\r\n\r\n # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\r\n if output_hidden_states:\r\n all_hidden_states = tuple(hidden_state.transpose(0, 1) for hidden_state in all_hidden_states)\r\n x = x.transpose(0, 1)\r\n encoder_hidden_states = encoder_hidden_states.transpose(0, 1)\r\n\r\n next_cache = next_decoder_cache if use_cache else None\r\n\r\n #isidora - start - return enc_dec_all_attn instead of decoder outputs\r\n return enc_dec_all_attn\r\n #isidora - end\r\n\r\n #isidora - in comment\r\n \"\"\"\r\n if not return_dict:\r\n return tuple(v for v in [x, next_cache, all_hidden_states, all_self_attns] if v is not None)\r\n return BaseModelOutputWithPast(\r\n last_hidden_state=x, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns\r\n )\r\n \"\"\"", "def __init__(self,\n size: int = 0,\n bb_size: int = 64,\n ff_size: int = 0,\n num_heads: int = 0,\n dropout: float = 0.1):\n super(CustomTransformerDecoderLayer, self).__init__()\n self.size = size\n\n self.trg_trg_att = ContMultiHeadedAttention(num_heads, bb_size, bb_size,\n dropout=dropout)\n self.src_trg_att = ContMultiHeadedAttention(num_heads, size, size,\n dropout=dropout)\n\n self.feed_forward_h1 = PositionwiseFeedForward(bb_size, ff_size=ff_size)\n self.feed_forward_h2 = PositionwiseFeedForward(size, ff_size=ff_size)\n\n self.x_layer_norm = nn.LayerNorm(size, eps=1e-6)\n self.spa_layer_norm = nn.LayerNorm(bb_size, eps=1e-6)\n\n self.dropout = nn.Dropout(dropout)", "def inference_call(self, inputs):\n self.eval()\n\n with torch.no_grad():\n mid, downsampling_features = self.encoder(inputs)\n hd = self.decode_branch(mid=mid, downsampling_features=downsampling_features, branch=0)\n pl = self.decode_branch(mid=mid, downsampling_features=downsampling_features, branch=1)\n return hd, pl", "def get_output_for(self, inputs, **kwargs):\n # Retrieve the layer input\n input = inputs[0]\n # Retrieve the mask when it is supplied\n mask = None\n hid_init = None\n cell_init = None\n if self.mask_incoming_index > 0:\n mask = inputs[self.mask_incoming_index]\n if self.hid_init_incoming_index > 0:\n hid_init = inputs[self.hid_init_incoming_index]\n if self.cell_init_incoming_index > 0:\n cell_init = inputs[self.cell_init_incoming_index]\n\n # Treat all dimensions after the second as flattened feature dimensions\n if input.ndim > 3:\n input = T.flatten(input, 3)\n\n # Because scan iterates over the first dimension we dimshuffle to\n # (n_time_steps, n_batch, n_features)\n input = input.dimshuffle(1, 0, 2)\n seq_len, num_batch, _ = input.shape\n\n # Stack input weight matrices into a (num_inputs, 4*num_units)\n # matrix, which speeds up computation\n W_in_stacked = T.concatenate(\n [self.W_in_to_ingate, self.W_in_to_forgetgate,\n self.W_in_to_cell, self.W_in_to_outgate], axis=1)\n\n # Same for hidden weight matrices\n W_hid_stacked = T.concatenate(\n [self.W_hid_to_ingate, self.W_hid_to_forgetgate,\n self.W_hid_to_cell, self.W_hid_to_outgate], axis=1)\n\n # Stack biases into a (4*num_units) vector\n b_stacked = T.concatenate(\n [self.b_ingate, self.b_forgetgate,\n self.b_cell, self.b_outgate], axis=0)\n\n if self.precompute_input:\n # Because the input is given for all time steps, we can\n # precompute_input the inputs dot weight matrices before scanning.\n # W_in_stacked is (n_features, 4*num_units). input is then\n # (n_time_steps, n_batch, 4*num_units).\n input = T.dot(input, W_in_stacked) + b_stacked\n\n # At each call to scan, input_n will be (n_time_steps, 4*num_units).\n # We define a slicing function that extract the input to each LSTM gate\n def slice_w(x, n):\n return x[:, n*self.num_units:(n+1)*self.num_units]\n\n # Create single recurrent computation step function\n # input_n is the n'th vector of the input\n def step(input_n, cell_previous, hid_previous, *args):\n if not self.precompute_input:\n input_n = T.dot(input_n, W_in_stacked) + b_stacked\n\n # Calculate gates pre-activations and slice\n gates = input_n + T.dot(hid_previous, W_hid_stacked)\n\n # Clip gradients\n if self.grad_clipping:\n gates = theano.gradient.grad_clip(\n gates, -self.grad_clipping, self.grad_clipping)\n\n # Extract the pre-activation gate values\n ingate = slice_w(gates, 0)\n forgetgate = slice_w(gates, 1)\n cell_input = slice_w(gates, 2)\n outgate = slice_w(gates, 3)\n\n if self.peepholes:\n # Compute peephole connections\n ingate += cell_previous*self.W_cell_to_ingate\n forgetgate += cell_previous*self.W_cell_to_forgetgate\n\n # Apply nonlinearities\n ingate = self.nonlinearity_ingate(ingate)\n forgetgate = self.nonlinearity_forgetgate(forgetgate)\n cell_input = self.nonlinearity_cell(cell_input)\n\n # Compute new cell value\n cell = forgetgate*cell_previous + ingate*cell_input\n\n if self.peepholes:\n outgate += cell*self.W_cell_to_outgate\n outgate = self.nonlinearity_outgate(outgate)\n\n # Compute new hidden unit activation\n hid = outgate*self.nonlinearity(cell)\n return [cell, hid]\n\n def step_masked(input_n, mask_n, cell_previous, hid_previous, *args):\n cell, hid = step(input_n, cell_previous, hid_previous, *args)\n\n # Skip over any input with mask 0 by copying the previous\n # hidden state; proceed normally for any input with mask 1.\n not_mask = 1 - mask_n\n cell = cell*mask_n + cell_previous*not_mask\n hid = hid*mask_n + hid_previous*not_mask\n\n return [cell, hid]\n\n if mask is not None:\n # mask is given as (batch_size, seq_len). Because scan iterates\n # over first dimension, we dimshuffle to (seq_len, batch_size) and\n # add a broadcastable dimension\n mask = mask.dimshuffle(1, 0, 'x')\n sequences = [input, mask]\n step_fun = step_masked\n else:\n sequences = input\n step_fun = step\n\n ones = T.ones((num_batch, 1))\n if isinstance(self.cell_init, Layer):\n pass\n elif isinstance(self.cell_init, T.TensorVariable):\n cell_init = self.cell_init\n else:\n # Dot against a 1s vector to repeat to shape (num_batch, num_units)\n cell_init = T.dot(ones, self.cell_init)\n\n if isinstance(self.hid_init, Layer):\n pass\n elif isinstance(self.hid_init, T.TensorVariable):\n hid_init = self.hid_init\n else:\n # Dot against a 1s vector to repeat to shape (num_batch, num_units)\n hid_init = T.dot(ones, self.hid_init)\n\n # The hidden-to-hidden weight matrix is always used in step\n non_seqs = [W_hid_stacked]\n # The \"peephole\" weight matrices are only used when self.peepholes=True\n if self.peepholes:\n non_seqs += [self.W_cell_to_ingate,\n self.W_cell_to_forgetgate,\n self.W_cell_to_outgate]\n\n # When we aren't precomputing the input outside of scan, we need to\n # provide the input weights and biases to the step function\n if not self.precompute_input:\n non_seqs += [W_in_stacked, b_stacked]\n\n if self.unroll_scan:\n # Retrieve the dimensionality of the incoming layer\n input_shape = self.input_shapes[0]\n # Explicitly unroll the recurrence instead of using scan\n cell_out, hid_out = unroll_scan(\n fn=step_fun,\n sequences=sequences,\n outputs_info=[cell_init, hid_init],\n go_backwards=self.backwards,\n non_sequences=non_seqs,\n n_steps=input_shape[1])\n else:\n # Scan op iterates over first dimension of input and repeatedly\n # applies the step function\n cell_out, hid_out = theano.scan(\n fn=step_fun,\n sequences=sequences,\n outputs_info=[cell_init, hid_init],\n go_backwards=self.backwards,\n truncate_gradient=self.gradient_steps,\n non_sequences=non_seqs,\n strict=True)[0]\n\n # When it is requested that we only return the final sequence step,\n # we need to slice it out immediately after scan is applied\n if self.only_return_final:\n hid_out = hid_out[-1]\n cell_out = cell_out[-1]\n else:\n # dimshuffle back to (n_batch, n_time_steps, n_features))\n hid_out = hid_out.dimshuffle(1, 0, 2)\n cell_out = cell_out.dimshuffle(1, 0, 2)\n\n # if scan is backward reverse the output\n if self.backwards:\n hid_out = hid_out[:, ::-1]\n cell_out = cell_out[:, ::-1]\n\n return T.concatenate([cell_out, hid_out], axis=2)", "def forward(self, x):\n # Encode\n encode_block1 = self.conv_encoder1(x)\n encode_pool1 = self.max_pool_encoder1(encode_block1)\n encode_block2 = self.conv_encoder2(encode_pool1)\n encode_pool2 = self.max_pool_encoder2(encode_block2)\n encode_block3 = self.conv_encoder3(encode_pool2)\n encode_pool3 = self.max_pool_encoder3(encode_block3)\n encode_block4 = self.conv_encoder4(encode_pool3)\n encode_pool4 = self.max_pool_encoder4(encode_block4)\n\n # Transitional block\n middle_block = self.transitional_block(encode_pool4)\n\n # Decode\n decode_block4 = torch.cat((middle_block, encode_block4), 1)\n cat_layer3 = self.conv_decoder4(decode_block4)\n decode_block3 = torch.cat((cat_layer3, encode_block3), 1)\n cat_layer2 = self.conv_decoder3(decode_block3)\n decode_block2 = torch.cat((cat_layer2, encode_block2), 1)\n cat_layer1 = self.conv_decoder2(decode_block2)\n decode_block1 = torch.cat((cat_layer1, encode_block1), 1)\n final_layer = self.final_layer(decode_block1)\n return final_layer", "def __call__(self, inputs, *args, **kwargs):\n out = inputs\n for i, layer in enumerate(self.layers):\n if i == 0:\n out = layer(out, *args, **kwargs)\n else:\n out = layer(out)\n return out", "def decoder(input_layer, skip_layer, num_filters, size=(4, 4), dropout=True):\n # Initialize the weights\n init = tf.random_normal_initializer(0.0, 0.02)\n\n # Transpose Convolutional Layer\n decoder_layer = Conv2DTranspose(filters=num_filters, kernel_size=size, strides=(2, 2), padding='same',\n kernel_initializer=init, use_bias=False)(input_layer)\n\n # Instance normalization\n decoder_layer = tfa.layers.InstanceNormalization()(decoder_layer, training=True)\n\n # Dropout\n if dropout:\n decoder_layer = Dropout(0.5)(decoder_layer, training=True)\n\n # Merge with skip connection\n decoder_layer = Concatenate()([decoder_layer, skip_layer])\n\n # ReLU activation\n decoder_layer = Activation('relu')(decoder_layer)\n return decoder_layer", "def forward(self, inputs, mask=None):\n return self._reduce_obj(inputs, mask=mask)", "def forward(self, input, dec_hidden=None):\n ### YOUR CODE HERE for part 2b\n ### TODO - Implement the forward pass of the character decoder.\n # print(\"=====input.size\",input.size())\n char_embedded= self.decoderCharEmb(input)\n # print(\"=====char_embedded.size\",char_embedded.size())\n out, dec_hidden = self.charDecoder(char_embedded,dec_hidden)\n # print(\"=====out.size\",out.size()) #dimensions (seq_length, batch, hidden_size)\n \n out_batch_first = out.permute(1, 0, 2) #dimensions (seq_length, batch, hidden_size)\n o_proj = self.char_output_projection(out_batch_first)\n scores = o_proj.permute(1, 0, 2) #dimensions (seq_length, batch, hidden_size)\n return scores,dec_hidden\n ### END YOUR CODE ", "def build(self, unused_input_shapes):\n if self.embedding_lookup is None:\n self.embedding_lookup = layers.OnDeviceEmbedding(\n vocab_size=self.config.vocab_size,\n embedding_width=self.config.hidden_size,\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self.config.initializer_range),\n name=\"target_embeddings\")\n self.embedding_postprocessor = EmbeddingPostprocessor(\n use_type_embeddings=False,\n use_position_embeddings=True,\n max_position_embeddings=self.config.max_position_embeddings,\n dropout_prob=self.config.hidden_dropout_prob,\n initializer=tf.keras.initializers.VarianceScaling(\n scale=self.config.initializer_gain,\n mode=\"fan_avg\",\n distribution=\"uniform\"),\n name=\"embedding_postprocessor\")\n # Decoder can use a different intermediate size.\n self.multi_channel_cross_attention = self.config.get(\n \"multi_channel_cross_attention\", False)\n self.decoder = TransformerDecoder(\n num_hidden_layers=self.config.num_decoder_layers,\n hidden_size=self.config.hidden_size,\n num_attention_heads=self.config.num_decoder_attn_heads,\n intermediate_size=self.config.decoder_intermediate_size,\n intermediate_activation=self.config.hidden_act,\n hidden_dropout_prob=self.config.hidden_dropout_prob,\n attention_probs_dropout_prob=self.config.attention_probs_dropout_prob,\n initializer_range=self.config.initializer_range,\n multi_channel_cross_attention=self.multi_channel_cross_attention,\n name=\"decoder\")\n super(Decoder, self).build(unused_input_shapes)", "def encode_decode(self, batch_inputs: Tensor,\n batch_data_samples: SampleList) -> Tensor:\n pass", "def forward(self, decoder_output):\r\n\r\n # decoder_output = [batch size, trg len, hid dim]\r\n\r\n decoder_output_seq = decoder_output[:, :-1, :] if self.has_non_seq else decoder_output\r\n decoder_output_non_seq = decoder_output[:, -1, :] if self.has_non_seq else None\r\n device = decoder_output.device\r\n\r\n seq_cat = [output_layer(decoder_output_seq) for output_layer in self.linear_seq_cat]\r\n seq_cont = self.linear_seq_cont(\r\n decoder_output_seq) if self.linear_seq_cont else torch.empty((0, 0), device=device)\r\n non_seq_cat = [output_layer(decoder_output_non_seq) for output_layer in self.linear_non_seq_cat]\r\n non_seq_cont = self.linear_non_seq_cont(\r\n decoder_output_non_seq) if self.linear_non_seq_cont else torch.empty((0, 0), device=device)\r\n\r\n return seq_cat, seq_cont, non_seq_cat, non_seq_cont", "def _decode_infer(self, decoder, _encoder_output, features, labels):\r\n\r\n return decoder(_encoder_output, labels)", "def forward(self, src):\n\n # src = [batch size, src len, hid_dim]\n\n batch_size = src.shape[0]\n src_len = src.shape[1]\n device = src.device\n\n src_mask = self._make_src_mask(batch_size, src_len, device)\n\n # src_mask = [batch size, src len]\n\n pos = torch.arange(0, src_len).unsqueeze(0).repeat(batch_size, 1).to(device)\n\n # pos = [batch size, src len]\n\n src = self.dropout(src * self.scale + self.pos_embedding(pos))\n\n # src = [batch size, src len, hid dim]\n\n for layer in self.layers:\n src = layer(src, src_mask)\n\n # src = [batch size, src len, hid dim]\n # src_mask = [batch size, 1, 1, src len]\n\n return src, src_mask", "def deconv_layer(self, inputs, field_size, channels_size,\n initializer_type, name, act_func=tf.nn.relu):\n batch, height, width, in_channels = inputs.get_shape().as_list()\n #shape = tf.shape(inputs)\n assert in_channels == channels_size[0], (\n 'Number of input channels doe not match filter inputs channels.'\n )\n with tf.variable_scope(name):\n channels_size.reverse() # now [out_c, in_c]\n filter_size = field_size + channels_size\n bias_size = [channels_size[0]]\n\n if initializer_type:\n initializer = tf.contrib.layers.xavier_initializer()\n else:\n initializer = tf.truncated_normal_initializer(stddev=.1)\n\n weights = tf.get_variable('W', filter_size, initializer=initializer)\n biases = tf.get_variable(\n 'b', bias_size, initializer=tf.constant_initializer(.1))\n\n #target_shape_tensor = tf.stack([shape[0], height, width, channels_size[0]])\n conv = tf.nn.conv2d_transpose(\n inputs,\n weights,\n #target_shape_tensor,\n [batch, height, width, channels_size[0]],\n [1, 1, 1, 1],\n padding='SAME')\n conv_bias = tf.nn.bias_add(conv, biases)\n if act_func == None:\n output = conv_bias\n else:\n output = act_func(conv_bias)\n #set_shape does not accept tensor\n #output.set_shape([batch, height, width, channels_size[0]])\n #this sets first size to none. why? Not used.\n #output = tf.reshape(output, target_shape_tensor)\n\n return output", "def conv_decoder(encoder_output):\n namescope = 'conv_decoder'\n with tf.variable_scope(namescope):\n net = tf.layers.conv2d(encoder_output,\n filters=256,\n kernel_size=(1, 1),\n padding='same',\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.0005),\n activation=tf.nn.elu)\n net = tf.layers.conv2d(net,\n filters=C,\n kernel_size=(1, 1),\n padding='same',\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.0005),\n activation=None)\n return net", "def encode_input_for_decoder(x_tensor, inp_lens_tensor, model_input_emb: EmbeddingLayer, model_enc: RNNEncoder):\n input_emb = model_input_emb.forward(x_tensor)\n (enc_output_each_word, enc_context_mask, enc_final_states) = model_enc.forward(input_emb, inp_lens_tensor)\n enc_final_states_reshaped = (enc_final_states[0].unsqueeze(0), enc_final_states[1].unsqueeze(0))\n return (enc_output_each_word, enc_context_mask, enc_final_states_reshaped)", "def forward(self, src, trg, src_mask, trg_mask, src_lengths, trg_lengths):\n encoder_hidden, encoder_final = self.encode(src, src_mask, src_lengths)\n return self.decode(encoder_hidden, encoder_final, src_mask, trg, trg_mask)", "def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float]=[123.675, 116.28, 103.53],\n pixel_std: List[float]=[58.395, 57.12, 57.375], ) -> None:\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\n \"pixel_mean\",\n paddle.to_tensor(pixel_mean).reshape([-1, 1, 1]),\n persistable=False)\n self.register_buffer(\n \"pixel_std\",\n paddle.to_tensor(pixel_std).reshape([-1, 1, 1]),\n persistable=False)", "def __init__(self, input_mask):\n super().__init__()\n if isinstance(input_mask, torch.Tensor):\n self.register_buffer('input_mask', input_mask.float(), persistent=False)\n else:\n self.input_mask = input_mask", "def call(self, \n inputs, \n encoder_outputs, \n look_ahead_mask, \n padding_mask, \n training, \n cache=None):\n for i, layer in enumerate(self._stack):\n inputs = layer.call(inputs, \n encoder_outputs, \n look_ahead_mask, \n padding_mask, \n training, \n cache=cache['layer_%d' % i] \n if cache is not None else None)\n outputs = self._layernorm(inputs)\n return outputs", "def get_output_for(self, inputs, **kwargs):\n # Retrieve the layer input\n input = inputs[0]\n # Retrieve the mask when it is supplied\n mask = None\n hid_init = None\n if self.mask_incoming_index > 0:\n mask = inputs[self.mask_incoming_index]\n if self.hid_init_incoming_index > 0:\n hid_init = inputs[self.hid_init_incoming_index]\n\n # Input should be provided as (n_batch, n_time_steps, n_features)\n # but scan requires the iterable dimension to be first\n # So, we need to dimshuffle to (n_time_steps, n_batch, n_features)\n input = input.dimshuffle(1, 0, *range(2, input.ndim))\n seq_len, num_batch = input.shape[0], input.shape[1]\n\n # When we are not precomputing the input, we also need to pass the\n # input-to-hidden parameters to step\n non_seqs = L.get_all_params(self.input_to_hidden)\n\n # Create single recurrent computation step function\n def step(input_n, hid_previous, *args):\n hid_pre = L.get_output(\n self.input_to_hidden,{self.input_to_hidden_input : input_n,\n self.input_to_hidden_hidden : hid_previous}, **kwargs)\n\n # Clip gradients\n if self.grad_clipping:\n hid_pre = theano.gradient.grad_clip(\n hid_pre, -self.grad_clipping, self.grad_clipping)\n\n return hid_pre\n\n def step_masked(input_n, mask_n, hid_previous, *args):\n # Skip over any input with mask 0 by copying the previous\n # hidden state; proceed normally for any input with mask 1.\n hid = step(input_n, hid_previous, *args)\n hid_out = T.switch(mask_n, hid, hid_previous)\n return [hid_out]\n\n if mask is not None:\n mask = mask.dimshuffle(1, 0, 'x')\n sequences = [input, mask]\n step_fun = step_masked\n else:\n sequences = input\n step_fun = step\n\n if not isinstance(self.hid_init, L.Layer):\n # The code below simply repeats self.hid_init num_batch times in\n # its first dimension. Turns out using a dot product and a\n # dimshuffle is faster than T.repeat.\n dot_dims = (list(range(1, self.hid_init.ndim - 1)) +\n [0, self.hid_init.ndim - 1])\n hid_init = T.dot(T.ones((num_batch, 1)),\n self.hid_init.dimshuffle(dot_dims))\n\n if self.unroll_scan:\n # Retrieve the dimensionality of the incoming layer\n input_shape = self.input_shapes[0]\n # Explicitly unroll the recurrence instead of using scan\n hid_out = unroll_scan(\n fn=step_fun,\n sequences=sequences,\n outputs_info=[hid_init],\n go_backwards=self.backwards,\n non_sequences=non_seqs,\n n_steps=input_shape[1])[0]\n else:\n # Scan op iterates over first dimension of input and repeatedly\n # applies the step function\n hid_out = theano.scan(\n fn=step_fun,\n sequences=sequences,\n go_backwards=self.backwards,\n outputs_info=[hid_init],\n non_sequences=non_seqs,\n truncate_gradient=self.gradient_steps,\n strict=True)[0]\n\n # When it is requested that we only return the final sequence step,\n # we need to slice it out immediately after scan is applied\n if self.only_return_final:\n hid_out = hid_out[-1]\n else:\n # dimshuffle back to (n_batch, n_time_steps, n_features))\n hid_out = hid_out.dimshuffle(1, 0, *range(2, hid_out.ndim))\n\n # if scan is backward reverse the output\n if self.backwards:\n hid_out = hid_out[:, ::-1]\n\n return hid_out", "def make_decoder(self, latent_size: int, output_size: int) -> nn.Module:\n pass", "def encoder(self, inputs):\n pass", "def call(self, inputs, training=None, mask=None):\n # pylint: disable=arguments-differ\n raise NotImplementedError()", "def forward(self, memory, sampler_outputs, decoder_inputs):\n\n # print(decoder_inputs.size())\n # decoder_inputs = self.parse_decoder_inputs(decoder_inputs)\n sampler_outputs = sampler_outputs.transpose(0, 1) # [T, B, 1280]\n decoder_inputs = decoder_inputs.transpose(0, 1) # [T, B , Mel]\n decoder_inputs = self.prenet(decoder_inputs) # [T, B, 256]\n\n # [T, B, 1312 + 256]\n decoder_inputs = torch.cat((decoder_inputs, memory.transpose(0, 1)), 2)\n\n self.initialize_decoder_states(memory)\n\n mel_outputs = list()\n\n while len(mel_outputs) < decoder_inputs.size(0):\n decoder_input = decoder_inputs[len(mel_outputs)]\n sample_output = sampler_outputs[len(mel_outputs)]\n mel_output = self.decode(decoder_input, sample_output)\n # print(mel_output.size())\n mel_outputs += [mel_output.squeeze(1)]\n # gate_outputs += [gate_output.squeeze(1)]\n # alignments += [attention_weights]\n\n mel_outputs = self.parse_decoder_outputs(mel_outputs)\n\n return mel_outputs", "def build(self, unused_input_shapes):\n self.layers = []\n for i in range(self.num_hidden_layers):\n self.layers.append(\n TransformerDecoderBlock(\n hidden_size=self.hidden_size,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n intermediate_activation=self.intermediate_activation,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n initializer_range=self.initializer_range,\n multi_channel_cross_attention=self.multi_channel_cross_attention,\n name=(\"layer_%d\" % i)))\n super(TransformerDecoder, self).build(unused_input_shapes)", "def __init__(self,\n encoder: nn.Module,\n decoder: nn.Module,\n **kwargs):\n super().__init__(encoder=encoder, decoder=decoder, **kwargs)\n\n # send to device\n self.to(self.device)", "def decoder_setup_1():\n decoder = RetinaDecoder(\n # pre-pooling\n {'op': 'avg', 'kernel': (1, 2, 2), 'causal': True},\n # grouped temporal conv stacks:\n [\n {\n 'in': 15, 'out': [45, 45, 15], 'kernel': (2, 1, 1),\n 'stride': 1, 'groups': 15, 'acivation': nn.ReLU,\n 'pool': {'op': 'avg', 'kernel': (2, 2, 2), 'causal': True}\n }\n ],\n # spatial conv layers: {in, out, kernel, stride}\n [\n # {'in': 15, 'out': 64, 'kernel': (1, 3, 3), 'stride': 1}\n ],\n # for each ConvRNN cell:\n [\n\n ],\n # temporal convolution stack(s)\n [\n {\n 'in': 15, 'out': [128, 256, 128], 'kernel': (2, 3, 3),\n 'stride': 1, 'groups': 1, 'acivation': nn.ReLU\n }\n ],\n # ConvTranspose layers: {in, out, kernel, stride}\n [\n {'in': 128, 'out': 64, 'kernel': (3, 3, 3), 'stride': (2, 2, 2)},\n {'in': 64, 'out': 16, 'kernel': (3, 3, 3), 'stride': (1, 2, 2)},\n ],\n # post conv layers\n [\n {'in': 16, 'out': 8, 'kernel': (1, 3, 3), 'stride': 1},\n {'in': 8, 'out': 1, 'kernel': (1, 1, 1), 'stride': 1}\n ],\n )\n return decoder", "def forward(self, inputs):\r\n\r\n assert len(inputs) == self.depth, \\\r\n \"Mismatch between input and Network scales\"\r\n\r\n y = self.rgb_to_features[self.depth - 2](inputs[self.depth - 1])\r\n y = self.layers[self.depth - 2](y)\r\n for x, block, converter in \\\r\n zip(reversed(inputs[1:-1]),\r\n reversed(self.layers[:-1]),\r\n reversed(self.rgb_to_features[:-1])):\r\n input_part = converter(x) # convert the input:\r\n y = torch.cat((input_part, y), dim=1) # concatenate the inputs:\r\n y = block(y) # apply the block\r\n\r\n # calculate the final block:\r\n input_part = self.final_converter(inputs[0])\r\n y = torch.cat((input_part, y), dim=1)\r\n y = self.final_block(y)\r\n\r\n # return calculated y\r\n return y", "def forward(self, x):\n # encode\n encode_block1 = self.conv_encode1(x)\n encode_pool1 = self.conv_maxpool1(encode_block1)\n encode_block2 = self.conv_encode2(encode_pool1)\n encode_pool2 = self.conv_maxpool2(encode_block2)\n encode_block3 = self.conv_encode3(encode_pool2)\n encode_pool3 = self.conv_maxpool3(encode_block3)\n # Bottleneck\n bottleneck1 = self.bottleneck(encode_pool3)\n # Decode\n decode_block3 = crop_and_concat(\n bottleneck1, encode_block3, crop=True)\n cat_layer2 = self.conv_decode3(decode_block3)\n decode_block2 = crop_and_concat(\n cat_layer2, encode_block2, crop=True)\n cat_layer1 = self.conv_decode2(decode_block2)\n decode_block1 = crop_and_concat(\n cat_layer1, encode_block1, crop=True)\n final_layer = self.final_layer(decode_block1)\n return final_layer", "def encode(self, inputs, masks):\n with tf.variable_scope(\"encoder\") as scope_encoder:\n #compute sequence length\n sequence_lengths = tf.reduce_sum(masks, axis = 1) \n #create a forward cell\n fw_cell = tf.contrib.rnn.LSTMCell(self.size)\n\n #pass the cells to bilstm and create the bilstm\n bw_cell = tf.contrib.rnn.LSTMCell(self.size)\n output, final_state = tf.nn.bidirectional_dynamic_rnn(fw_cell, \\\n bw_cell, inputs, \\\n sequence_length = sequence_lengths, \\\n dtype = tf.float32, \\\n parallel_iterations = 256)\n output_lstm = tf.concat([output[0], output[1]], axis = -1)\n final_state_lstm = tf.concat([final_state[0], final_state[1]], axis = -1)\n return output_lstm, final_state_lstm", "def call(self, inputs, mask=None):\n graph_signal = inputs[0]\n spatial_attention = inputs[1]\n outputs = []\n for time_step in range(graph_signal.shape[3]):\n # shape is (batch_size, V, F)\n graph_signal_tmp = graph_signal[:, :, :, time_step]\n output = K.zeros(shape=(self.num_of_vertices, self.num_of_filters))\n for k in range(self.K):\n # shape of T_k is (V, V)\n T_k = self.cheb_polynomials[k]\n\n # shape of T_k_with_at is (batch_size, V, V)\n T_k_with_at = T_k * spatial_attention\n\n # shape of theta_k is (F, num_of_filters)\n theta_k = self.theta[k]\n\n # shape is (batch_size, V, F)\n rhs = K.batch_dot(K.permute_dimensions(T_k_with_at, (0, 2, 1)), graph_signal_tmp)\n\n output = output + K.dot(rhs, theta_k)\n # 最后增加一维\n outputs.append(K.expand_dims(output, axis=-1))\n\n tmp = Concatenate(axis=-1)([*outputs]) if len(outputs) > 1 else outputs[0]\n\n return K.relu(tmp)", "def forward(self, input):\n return self.layers(input)", "def _decode_train(self, decoder, _encoder_output, _features, labels):\r\n target_embedded = tf.nn.embedding_lookup(decoder.target_embedding,\r\n labels[\"target_ids\"])\r\n\r\n return decoder(_encoder_output, labels=target_embedded[:,:-1], sequence_length=labels[\"target_len\"]-1)", "def call(self, x, training, mask):\n seq_len = tf.shape(x)[1]\n # adding embedding and position encoding.\n embedding = self.embedding(x) # (batch_size, input_seq_len, d_model)\n embedding *= tf.math.sqrt(tf.cast(self.dm, tf.float32))\n embedding += self.positional_encoding[:, :seq_len, :]\n\n encoder_out = self.dropout(embedding, training=training)\n\n for i in range(self.N):\n encoder_out = self.blocks[i](encoder_out, training, mask)\n\n return encoder_out", "def forward(self, src, src_mask=None, src_key_padding_mask=None):\n # type: (Tensor, Optional[Tensor], Optional[Tensor]) -> Tensor\n src = self.pre_norm(src)\n\n # Self attention layer\n src2 = src\n src2 = self.self_attn(src2, src2, src2, attn_mask=src_mask,\n key_padding_mask=src_key_padding_mask)\n src2 = src2[0] # no attention weights\n src2 = src2 * self.resweight\n src = src + self.dropout1(src2)\n\n # Pointiwse FF Layer\n if self.factor_ff:\n #src2 = self.fac_linear1(self.dropout(self.activation(self.linear1(src))))\n #src = src + self.dropout2(src2 * self.resweight)\n #src2 = self.linear2(self.dropout(self.activation(self.fac_linear2(src))))\n #src = src + self.dropout2(src2 * self.resweight)\n src2 = self.dropout1(self.fac_linear1(self.activation(self.linear1(src))))\n src2 = self.linear2(self.dropout(self.activation(self.fac_linear2(src2))))\n src = src + self.dropout2(src2 * self.resweight)\n else:\n src2 = src \n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src2 = src2 * self.resweight\n src = src + self.dropout2(src2)\n\n if self.adapter_finetune:\n src2 = src \n src2 = self.ada_linear2(self.ada_dropout1(self.activation(self.ada_linear1(src2))))\n src2 = src2 * self.resweight\n src = src + self.ada_dropout2(src2)\n \n return src", "def forward(self, encoder_h, decoder_h, lengths):\n \n # Attention\n output = torch.matmul(torch.matmul(decoder_h.unsqueeze(1), self.W), encoder_h.unsqueeze(1).transpose(2, 3))\n output = output + torch.matmul(self.u, encoder_h.transpose(1,2)).unsqueeze(2)\n output = output + torch.matmul(self.v, decoder_h.transpose(1,2)).unsqueeze(2)\n output = output + self.b.unsqueeze(1).unsqueeze(2)\n \n return output", "def predict_from(self, inputs, to_layers):", "def call(self, inputs, training=None, mask=None):\n inputs = tf.nn.relu(self.conv(inputs))\n return inputs", "def forward(self, *inputs):\n raise NotImplementedError", "def _process_decoder_input(self,target_data, tgt_sos_id, batch_size):\n x = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])\n y = tf.concat([tf.fill([batch_size, 1], tgt_sos_id), x], 1)\n\n for item in y:\n item.remove(3)\n return y", "def forward(self, input_ids, attention_mask=None, decoder_input_ids=None, lm_labels=None):\n return self.model(\n input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, lm_labels=lm_labels,\n )", "def __init__(self,\n size: int = 0,\n ff_size: int = 0,\n num_heads: int = 0,\n dropout: float = 0.1,\n src_trg_att: bool = True):\n super(TransformerDecoderLayer, self).__init__()\n self.size = size\n self.src_trg_att = src_trg_att\n self.trg_trg_att = MultiHeadedAttention(num_heads, size,\n dropout=dropout)\n if src_trg_att:\n self.src_trg_att = MultiHeadedAttention(num_heads, size,\n dropout=dropout)\n\n self.feed_forward = PositionwiseFeedForward(size, ff_size=ff_size)\n\n self.x_layer_norm = nn.LayerNorm(size, eps=1e-6)\n self.dec_layer_norm = nn.LayerNorm(size, eps=1e-6)\n\n self.dropout = nn.Dropout(dropout)", "def run(layers):", "def decode(self,\n decoder_input,\n encoder_output,\n encoder_decoder_attention_bias,\n decoder_self_attention_bias,\n hparams,\n cache=None,\n nonpadding=None,\n losses=None):\n del losses\n # TODO(dehghani): enable caching.\n del cache\n\n decoder_input = tf.nn.dropout(decoder_input,\n 1.0 - hparams.layer_prepostprocess_dropout)\n\n # No caching in Universal Transformers!\n (decoder_output, dec_extra_output) = (\n my_universal_transformer_util.universal_transformer_decoder(\n decoder_input,\n encoder_output,\n decoder_self_attention_bias,\n encoder_decoder_attention_bias,\n hparams,\n nonpadding=nonpadding,\n save_weights_to=self.attention_weights))\n\n # Expand since t2t expects 4d tensors.\n return tf.expand_dims(decoder_output, axis=2), dec_extra_output", "def forward(self, x, mask, query_embed, pos_embed):\n _, bs, _ = x.shape\n query_embed = query_embed.unsqueeze(1).repeat(\n 1, bs, 1) # [num_query, embed_dims] -> [num_query, bs, embed_dims]\n\n enc_mem = self.encoder(\n query=x,\n key=None,\n value=None,\n query_pos=pos_embed,\n query_key_padding_mask=mask)\n target = torch.zeros_like(query_embed)\n # out_dec: [num_dec_layers, num_query, bs, embed_dims]\n out_dec = self.decoder(\n query=target,\n key=enc_mem,\n value=enc_mem,\n key_pos=pos_embed,\n query_pos=query_embed,\n key_padding_mask=mask)\n out_dec = out_dec.transpose(1, 2)\n return out_dec, enc_mem", "def forward(self, x: Tensor) -> Tensor:\n x1 = x[:, 0]\n x2 = x[:, 1]\n features1, features2 = self.encoder(x1), self.encoder(x2)\n features = [\n torch.cat([features2[i], features1[i]], dim=1)\n for i in range(1, len(features1))\n ]\n features.insert(0, features2[0])\n decoder_output = self.decoder(*features)\n masks: Tensor = self.segmentation_head(decoder_output)\n return masks", "def forward(self, inputs, inputs1):\n\n down1, indices_1, unpool_shape1 = self.layer_1(inputs=inputs,\n layer_size=2)\n down2, indices_2, unpool_shape2 = self.layer_2(inputs=down1,\n layer_size=2)\n down3, indices_3, unpool_shape3 = self.layer_3(inputs=down2,\n layer_size=3)\n down4, indices_4, unpool_shape4 = self.layer_4(inputs=down3,\n layer_size=3)\n down5, indices_5, unpool_shape5 = self.layer_5(inputs=down4,\n layer_size=3)\n\n up5 = self.layer_6(inputs=down5, indices=indices_5,\n output_shape=unpool_shape5, layer_size=3)\n up4 = self.layer_7(inputs=up5, indices=indices_4,\n output_shape=unpool_shape4, layer_size=3)\n up3 = self.layer_8(inputs=up4, indices=indices_3,\n output_shape=unpool_shape3, layer_size=3)\n up2 = self.layer_9(inputs=up3, indices=indices_2,\n output_shape=unpool_shape2, layer_size=2)\n output = self.layer_10(inputs=up2, indices=indices_1,\n output_shape=unpool_shape1, layer_size=2)\n\n # Second Modality\n\n down11, indices_11, unpool_shape11 = self.layer_11(inputs=inputs,\n layer_size=2)\n down12, indices_12, unpool_shape12 = self.layer_12(inputs=down1,\n layer_size=2)\n down13, indices_13, unpool_shape13 = self.layer_13(inputs=down2,\n layer_size=3)\n down14, indices_14, unpool_shape14 = self.layer_14(inputs=down3,\n layer_size=3)\n down15, indices_15, unpool_shape15 = self.layer_15(inputs=down4,\n layer_size=3)\n\n up15 = self.layer_16(inputs=down15, indices=indices_15,\n output_shape=unpool_shape15, layer_size=3)\n up14 = self.layer_17(inputs=up15, indices=indices_14,\n output_shape=unpool_shape4, layer_size=3)\n up13 = self.layer_18(inputs=up14, indices=indices_13,\n output_shape=unpool_shape13, layer_size=3)\n up12 = self.layer_19(inputs=up13, indices=indices_12,\n output_shape=unpool_shape12, layer_size=2)\n output1 = self.layer_110(inputs=up12, indices=indices_11,\n output_shape=unpool_shape11, layer_size=2)\n\n # End Pipe\n\n Concat = torch.cat((output, output1), 1)\n\n finalout = self.layer_1110(Concat)\n\n return finalout", "def __init__(self, num_mels=80, num_freq=513, prenet_hidden_size=512, decoder_hidden_size=512,\n attention_dropout=0.1,\n layer_postprocess_dropout=0.1, prenet_activation_fn=None, conv_layers_num=4,\n mag_conv_layers_num=4, prenet_layers=2,\n prenet_dropout=0.5,\n prenet_use_inference_dropout=False,\n cnn_dropout_prob=0.1,\n bn_momentum=0.95,\n bn_epsilon=-1e8,\n reduction_factor=2,\n attention_layers=4,\n self_attention_conv_params=None,\n attention_heads=1,\n attention_cnn_dropout_prob=0.5,\n window_size=4,\n back_step_size=0, kernel_size=5, regularizer=None,\n force_layers=None, dtype=tf.float32, name=\"centaur_decoder\", is_prediction=False, is_training=False,\n is_validation=False):\n self.kernel_size = kernel_size\n\n if force_layers is None:\n force_layers = [1, 3]\n self.is_validation = is_validation\n self.is_prediction = is_prediction\n self.name = name\n self.is_training = is_training\n self.prenet = None\n self.linear_projection = None\n self.attentions = []\n self.output_normalization = None\n self.conv_layers = []\n self.mag_conv_layers = []\n self.conv_layers_num = conv_layers_num\n self.mag_conv_layers_num = mag_conv_layers_num\n self.stop_token_projection_layer = None\n self.mel_projection_layer = None\n self.mag_projection_layer = None\n self.regularizer = regularizer\n self.num_mels = num_mels\n self.num_freq = num_freq\n self.reduction_factor = reduction_factor\n self.prenet_layers = prenet_layers\n self.prenet_hidden_size = prenet_hidden_size\n self.prenet_activation_fn = prenet_activation_fn if prenet_activation_fn else tf.nn.relu\n self.prenet_use_inference_dropout = prenet_use_inference_dropout\n self.prenet_dropout = prenet_dropout\n self.cnn_dropout_prob = cnn_dropout_prob\n self.dtype = dtype\n self.bn_momentum = bn_momentum\n self.bn_epsilon = bn_epsilon\n self.decoder_hidden_size = decoder_hidden_size\n self.attention_layers = attention_layers\n self.force_layers = force_layers\n\n self.window_size = window_size\n self.attention_heads = attention_heads\n self.attention_dropout = attention_dropout\n self.layer_postprocess_dropout = layer_postprocess_dropout\n self.attention_cnn_dropout_prob = attention_cnn_dropout_prob\n self.back_step_size = back_step_size\n if self_attention_conv_params is None:\n self_attention_conv_params = {\n \"kernel_size\": [self.kernel_size],\n \"stride\": [1],\n \"num_channels\": self.decoder_hidden_size,\n \"padding\": \"VALID\",\n \"is_causal\": True,\n \"activation_fn\": tf.nn.relu\n }\n self.self_attention_conv_params = self_attention_conv_params", "def decode(self):\n for layer in self.layers:\n layer.decode()", "def decode(self):\n for layer in self.layers:\n layer.decode()", "def forward(self, src, src_mask):\n\n # src = [batch size, src len, hid dim]\n # src_mask = [batch size, src len]\n\n # self attention\n _src, _ = self.self_attention(src, src, src, src_mask)\n\n # dropout, residual connection and layer norm\n src = self.self_attn_layer_norm(src + self.dropout(_src))\n\n # src = [batch size, src len, hid dim]\n\n # positionwise feedforward\n _src = self.positionwise_feedforward(src)\n\n # dropout, residual and layer norm\n src = self.ff_layer_norm(src + self.dropout(_src))\n\n # src = [batch size, src len, hid dim]\n\n return src", "def attention_decoder(decoder_inputs,\n attention_states,\n cell,\n output_size=None,\n dtype=None,\n scope=None):\n if not decoder_inputs:\n raise ValueError(\"Must provide at least 1 input to attention decoder.\")\n if output_size is None:\n output_size = cell.output_size\n \n # ==================================scope=================================================\n with variable_scope.variable_scope(scope or \"TemporalAttn\", dtype=dtype) as scope:\n \n dtype = scope.dtype\n batch_size = array_ops.shape(decoder_inputs[0])[0] # Needed for reshaping.\n attn_length = attention_states.get_shape()[1].value\n attn_size = attention_states.get_shape()[2].value\n \n # To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.\n hidden = array_ops.reshape(attention_states, [-1, attn_length, 1, attn_size])\n # U_d * h_i for i in range(T) (filter)\n u = variable_scope.get_variable(\"AttnDecoderU\", [1, 1, attn_size, attn_size], dtype=dtype)\n hidden_features = nn_ops.conv2d(hidden, u, [1, 1, 1, 1], \"SAME\")\n \n v = variable_scope.get_variable(\"AttnDecoderV\", [attn_size], dtype=dtype)\n \n # how to get the initial_state\n initial_state_size = array_ops.stack([batch_size, cell.output_size])\n initial_state = [array_ops.zeros(initial_state_size, dtype=dtype) for _ in xrange(2)]\n state = initial_state\n \n w = variable_scope.get_variable(\"AttnDecoderW\", [2*cell.output_size, attn_size], dtype=dtype)\n b = variable_scope.get_variable(\"AttnDecoderb\", [attn_size], dtype=dtype)\n \n # beta_scalar = variable_scope.get_variable(\"BetaScalar\", [attn_length])\n \n def attention(query, step):\n \"\"\"\n Put attention masks on hidden using hidden_features and query.\n \"\"\"\n \n if nest.is_sequence(query): # If the query is a tuple, flatten it.\n query_list = nest.flatten(query)\n query = array_ops.concat(query_list, 1)\n _tmp = math_ops.matmul(query, w) + b\n _tmp = array_ops.reshape(_tmp, [-1, 1, 1, attn_size])\n # Attention mask is a softmax of v^T * tanh(...).\n s = math_ops.reduce_sum(v * math_ops.tanh(hidden_features + _tmp), [2, 3])\n # beta = math_ops.multiply(nn_ops.softmax(s, name=\"beta_%d\" % step), beta_scalar)\n beta = nn_ops.softmax(s, name=\"beta_%d\" % step)\n # Now calculate the attention-weighted vector d.\n \n hidden_attn = math_ops.reduce_sum(array_ops.reshape(beta, [-1, attn_length, 1, 1]) * hidden,\n [1, 2])\n return hidden_attn, beta\n\n outputs = []\n attns = []\n with variable_scope.variable_scope(\"Attn\"):\n h_t, attn_t = attention(state, 0)\n attns.append(attn_t)\n # =============================recurrent===========================\n for i, inp in enumerate(decoder_inputs):\n if i > 0:\n variable_scope.get_variable_scope().reuse_variables()\n \n # LSTM_d([\\tilde{\\mathbf{h}}_{t}; \\mathbf{y}_t], \\hat{\\mathbf{y}}_{t}, \\mathbf{s}^d_{t})\n with variable_scope.variable_scope(\"DecoderOutput\"):\n x = tf.concat([inp, h_t], 1)\n cell_output, state = cell(x, state)\n outputs.append(cell_output)\n\n with variable_scope.variable_scope(\"Attn\"):\n h_t, attn_t = attention(state, i+1)\n attns.append(attn_t)\n \n with variable_scope.variable_scope(\"AttnDecoderOutput\"):\n inputs = tf.concat([cell_output, h_t], 1)\n output = Linear(inputs, output_size, True)(inputs)\n outputs.append(output)\n \n return outputs, state, attns", "def _decode_train(self):\n\n # the basic idea is, we use golden sketch during train and in order to copy from source\n # we given true mask of decoder to generate right copy weights\n state = {'encoder': self.concated_encoder_output}\n\n def transformer_concated_decoder_internal(inputs, memory, bias, mem_bias, params, state=None, scope=None,\n reuse=False):\n return transformer_decoder(inputs, memory, bias, mem_bias, params, state, scope, reuse)\n\n self.final_logits = self._decode_func(\n self.tgt_seq, self.tgt_len, self.target_embeddings, self.decoder_weights,\n self.final_enc_attn_bias, 'train', state, self.vocab_size, use_copy=True,\n expand_source_ids_oo=self.concat_src_ids_oo,\n max_out_oovs=self.max_out_oovs, src_mask=self.concat_src_mask,\n decoder_fn=transformer_concated_decoder_internal,\n scope='final_decoder')", "def forward(self, inputs, memories, mask):\n memory = self.get_go_frame(inputs).unsqueeze(0)\n memories = self._reshape_memory(memories)\n memories = torch.cat((memory, memories), dim=0)\n memories = self._update_memory(memories)\n memories = self.prenet(memories)\n\n self._init_states(inputs, mask=mask)\n self.attention.init_states(inputs)\n\n outputs, stop_tokens, alignments = [], [], []\n while len(outputs) < memories.size(0) - 1:\n memory = memories[len(outputs)]\n decoder_output, attention_weights, stop_token = self.decode(memory)\n outputs += [decoder_output.squeeze(1)]\n stop_tokens += [stop_token.squeeze(1)]\n alignments += [attention_weights]\n\n outputs, stop_tokens, alignments = self._parse_outputs(\n outputs, stop_tokens, alignments)\n return outputs, alignments, stop_tokens", "def __call__(self, **kwargs):\n depthwise_decoder = kwargs.get('depthwise_decoder', True)\n\n base = tf.keras.applications.MobileNetV2(\n input_shape=self._input_shape,\n include_top=False,\n weights='imagenet'\n )\n\n if not self._trainable:\n base.trainable = False\n\n base_out = base.get_layer('block_16_project')\n\n # encoder skip connections\n # block 13\n skip_b13 = base.get_layer('block_13_expand_relu')\n s13_filters = tf.keras.backend.int_shape(skip_b13.output)[-1]\n\n # block 6\n skip_b6 = base.get_layer('block_6_expand_relu')\n s6_filters = tf.keras.backend.int_shape(skip_b6.output)[-1]\n\n # block 3\n skip_b3 = base.get_layer('block_3_expand_relu')\n s3_filters = tf.keras.backend.int_shape(skip_b3.output)[-1]\n\n # block 1\n skip_b1 = base.get_layer('block_1_expand_relu')\n s1_filters = tf.keras.backend.int_shape(skip_b1.output)[-1]\n\n if depthwise_decoder:\n # bridge first\n x = self._residual_block(\n base_out.output,\n n_filters=s13_filters,\n kernel_size=3, strides=1\n )\n\n x = self._residual_block(\n x,\n n_filters=s13_filters,\n kernel_size=3,\n strides=1\n )\n\n # and start going up\n x = tf.keras.layers.UpSampling2D((2, 2))(x)\n x = tf.keras.layers.Concatenate()([x, skip_b13.output])\n\n x = self._residual_block(\n x,\n n_filters=s6_filters,\n kernel_size=3,\n strides=1\n )\n\n else:\n x = self._upconv(\n base_out.output,\n n_filters=s6_filters,\n kernel_size=3,\n strides=2\n )\n\n x = tf.keras.layers.Concatenate()([x, skip_b13.output])\n\n # upsample and concat with block 6\n if depthwise_decoder:\n x = tf.keras.layers.UpSampling2D((2, 2))(x)\n x = tf.keras.layers.Concatenate()([x, skip_b6.output])\n\n x = self._residual_block(\n x,\n n_filters=s3_filters,\n kernel_size=3,\n strides=1\n )\n\n else:\n x = self._upconv(\n x,\n n_filters=s3_filters,\n kernel_size=3,\n strides=2\n )\n\n x = tf.keras.layers.Concatenate()([x, skip_b6.output])\n\n # upsample and concat with block 3\n if depthwise_decoder:\n x = tf.keras.layers.UpSampling2D((2, 2))(x)\n x = tf.keras.layers.Concatenate()([x, skip_b3.output])\n\n x = self._residual_block(\n x,\n n_filters=s1_filters,\n kernel_size=3,\n strides=1\n )\n\n else:\n x = self._upconv(\n x,\n n_filters=s1_filters,\n kernel_size=3,\n strides=2\n )\n\n x = tf.keras.layers.Concatenate()([x, skip_b3.output])\n\n # upsample and concat with block 1\n if depthwise_decoder:\n x = tf.keras.layers.UpSampling2D((2, 2))(x)\n x = tf.keras.layers.Concatenate()([x, skip_b1.output])\n\n x = self._residual_block(\n x,\n n_filters=64,\n kernel_size=3,\n strides=1\n )\n\n else:\n x = self._upconv(\n x,\n n_filters=64,\n kernel_size=3,\n strides=2\n )\n\n x = tf.keras.layers.Concatenate()([x, skip_b1.output])\n\n if self._mode == 'binary':\n x = tf.keras.layers.UpSampling2D((2, 2))(x)\n\n x = tf.keras.layers.SeparableConv2D(\n 1,\n kernel_size=3,\n strides=1,\n padding='same'\n )(x)\n\n out = tf.keras.activations.sigmoid(x)\n\n else:\n x = tf.keras.layers.UpSampling2D((2, 2))(x)\n\n x = tf.keras.layers.SeparableConv2D(\n self._n_classes,\n kernel_size=3,\n strides=1,\n padding='same'\n )(x)\n\n out = tf.keras.activations.softmax(x)\n\n return tf.keras.models.Model(inputs=base.input, outputs=out)", "def forward(self,\n x: Tensor = None,\n memory: Tensor = None,\n src_mask: Tensor = None,\n trg_mask: Tensor = None) -> Tensor:\n # decoder/target self-attention\n\n x_norm = self.x_layer_norm(x)\n# print(x_norm)\n# trg_mask()\n# print(trg_mask)\n\n h1 = self.trg_trg_att(x_norm, x_norm, x_norm, mask=trg_mask)\n h1 = self.dropout(h1) + x\n\n\n if self.src_trg_att:\n # source-target attention\n h1_norm = self.dec_layer_norm(h1)\n h2 = self.src_trg_att(memory, memory, h1_norm, mask=src_mask)\n # final position-wise feed-forward layer\n o = self.feed_forward(self.dropout(h2) + h1)\n else:\n o = self.feed_forward(h1)\n return o", "def forward(self, inputs):\n raise NotImplementedError", "def _encode(self, input_dict):\n\n source_sequence, src_length = input_dict['source_tensors']\n\n training = (self._mode == \"train\")\n dropout_keep_prob = self.params['dropout_keep_prob'] if training else 1.0\n regularizer = self.params.get('regularizer', None)\n data_format = self.params.get('data_format', 'channels_last')\n bn_momentum = self.params.get('bn_momentum', 0.99)\n bn_epsilon = self.params.get('bn_epsilon', 1e-3)\n\n input_layer = tf.expand_dims(source_sequence, axis=-1) # BTFC\n # print(\"<<< input :\", input_layer.get_shape().as_list())\n\n batch_size = input_layer.get_shape().as_list()[0]\n freq = input_layer.get_shape().as_list()[2]\n\n # supported data_formats:\n # BTFC = channel_last (legacy)\n # BCTF = channel_first(legacy)\n # BFTC\n # BCFT\n\n if data_format=='channels_last' or data_format=='BTFC':\n layout = 'BTFC'\n dformat = 'channels_last'\n elif data_format=='channels_first' or data_format=='BCTF':\n layout = 'BCTF'\n dformat = 'channels_first'\n elif data_format=='BFTC':\n layout = 'BFTC'\n dformat = 'channels_last'\n elif data_format=='BCFT':\n layout = 'BCFT'\n dformat = 'channels_first'\n else:\n print(\"WARNING: unsupported data format: will use channels_last (BTFC) instead\")\n layout = 'BTFC'\n dformat = 'channels_last'\n\n #input_layer is BTFC\n\n if layout == 'BCTF':\n top_layer = tf.transpose(input_layer, [0, 3, 1, 2])\n elif layout == 'BFTC':\n top_layer = tf.transpose(input_layer, [0, 2, 1, 3])\n elif layout == 'BCFT':\n top_layer = tf.transpose(input_layer, [0, 3, 2, 1])\n else:\n top_layer = input_layer\n\n # print(\"<<< pre-conv:\", top_layer.get_shape().as_list())\n\n # ----- Convolutional layers ---------------------------------------------\n conv_layers = self.params['conv_layers']\n\n for idx_conv in range(len(conv_layers)):\n ch_out = conv_layers[idx_conv]['num_channels']\n kernel_size = conv_layers[idx_conv]['kernel_size'] # [T,F] format\n strides = conv_layers[idx_conv]['stride'] # [T,F] format\n padding = conv_layers[idx_conv]['padding']\n\n if padding == \"VALID\":\n src_length = (src_length - kernel_size[0] + strides[0]) // strides[0]\n freq = (freq - kernel_size[1] + strides[1]) // strides[1]\n else:\n src_length = (src_length + strides[0] - 1) // strides[0]\n freq = (freq + strides[1] -1) // strides[1]\n\n if layout == 'BFTC' or layout == 'BCFT':\n kernel_size = kernel_size[::-1]\n strides = strides[::-1]\n # print(kernel_size, strides)\n\n top_layer = conv_bn_actv(\n layer_type=\"conv2d\",\n name=\"conv{}\".format(idx_conv + 1),\n inputs=top_layer,\n filters=ch_out,\n kernel_size=kernel_size,\n activation_fn=self.params['activation_fn'],\n strides=strides,\n padding=padding,\n regularizer=regularizer,\n training=training,\n data_format=dformat,\n bn_momentum=bn_momentum,\n bn_epsilon=bn_epsilon,\n )\n # print(idx_conv, \"++++\", top_layer.get_shape().as_list())\n\n # convert layout --> BTFC\n # if data_format == 'channels_first':\n # top_layer = tf.transpose(top_layer, [0, 2, 3, 1])\n\n if layout == 'BCTF': # BCTF --> BTFC\n top_layer = tf.transpose(top_layer, [0, 2, 3, 1])\n elif layout == 'BFTC': # BFTC --> BTFC\n top_layer = tf.transpose(top_layer, [0, 2, 1, 3])\n elif layout == 'BCFT': # BCFT --> BTFC\n top_layer = tf.transpose(top_layer, [0, 3, 2, 1])\n\n\n # print(\">>> post-conv:\", top_layer.get_shape().as_list())\n\n # reshape to [B, T, FxC]\n f = top_layer.get_shape().as_list()[2]\n c = top_layer.get_shape().as_list()[3]\n fc = f * c\n top_layer = tf.reshape(top_layer, [batch_size, -1, fc])\n\n # ----- RNN ---------------------------------------------------------------\n num_rnn_layers = self.params['num_rnn_layers']\n if num_rnn_layers > 0:\n rnn_cell_dim = self.params['rnn_cell_dim']\n rnn_type = self.params['rnn_type']\n if self.params['use_cudnn_rnn']:\n # reshape to [B, T, C] --> [T, B, C]\n rnn_input = tf.transpose(top_layer, [1, 0, 2])\n if self.params['rnn_unidirectional']:\n direction = cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION\n else:\n direction = cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION\n\n if rnn_type == \"cudnn_gru\" or rnn_type == \"gru\":\n # pylint: disable=no-member\n rnn_block = tf.contrib.cudnn_rnn.CudnnGRU(\n num_layers=num_rnn_layers,\n num_units=rnn_cell_dim,\n direction=direction,\n dropout=1.0 - dropout_keep_prob,\n dtype=rnn_input.dtype,\n name=\"cudnn_gru\",\n )\n elif rnn_type == \"cudnn_lstm\" or rnn_type == \"lstm\":\n # pylint: disable=no-member\n rnn_block = tf.contrib.cudnn_rnn.CudnnLSTM(\n num_layers=num_rnn_layers,\n num_units=rnn_cell_dim,\n direction=direction,\n dropout=1.0 - dropout_keep_prob,\n dtype=rnn_input.dtype,\n name=\"cudnn_lstm\",\n )\n else:\n raise ValueError(\n \"{} is not a valid rnn_type for cudnn_rnn layers\".format(\n rnn_type)\n )\n top_layer, state = rnn_block(rnn_input)\n top_layer = tf.transpose(top_layer, [1, 0, 2])\n else:\n rnn_input = top_layer\n multirnn_cell_fw = tf.nn.rnn_cell.MultiRNNCell(\n [rnn_cell(rnn_cell_dim=rnn_cell_dim, layer_type=rnn_type,\n dropout_keep_prob=dropout_keep_prob)\n for _ in range(num_rnn_layers)]\n )\n if self.params['rnn_unidirectional']:\n top_layer, state = tf.nn.dynamic_rnn(\n cell=multirnn_cell_fw,\n inputs=rnn_input,\n sequence_length=src_length,\n dtype=rnn_input.dtype,\n time_major=False,\n )\n else:\n multirnn_cell_bw = tf.nn.rnn_cell.MultiRNNCell(\n [rnn_cell(rnn_cell_dim=rnn_cell_dim, layer_type=rnn_type,\n dropout_keep_prob=dropout_keep_prob)\n for _ in range(num_rnn_layers)]\n )\n top_layer, state = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=multirnn_cell_fw, cell_bw=multirnn_cell_bw,\n inputs=rnn_input,\n sequence_length=src_length,\n dtype=rnn_input.dtype,\n time_major=False\n )\n # concat 2 tensors [B, T, n_cell_dim] --> [B, T, 2*n_cell_dim]\n top_layer = tf.concat(top_layer, 2)\n # -- end of rnn------------------------------------------------------------\n\n if self.params['row_conv']:\n channels = top_layer.get_shape().as_list()[-1]\n top_layer = row_conv(\n name=\"row_conv\",\n input_layer=top_layer,\n batch=batch_size,\n channels=channels,\n activation_fn=self.params['activation_fn'],\n width=self.params['row_conv_width'],\n regularizer=regularizer,\n training=training,\n data_format=data_format,\n bn_momentum=bn_momentum,\n bn_epsilon=bn_epsilon,\n )\n\n # Reshape [B, T, C] --> [B*T, C]\n c = top_layer.get_shape().as_list()[-1]\n top_layer = tf.reshape(top_layer, [-1, c])\n\n # --- hidden layer with clipped ReLU activation and dropout---------------\n top_layer = tf.layers.dense(\n inputs=top_layer,\n units=self.params['n_hidden'],\n kernel_regularizer=regularizer,\n activation=self.params['activation_fn'],\n name='fully_connected',\n )\n outputs = tf.nn.dropout(x=top_layer, keep_prob=dropout_keep_prob)\n\n # reshape from [B*T,A] --> [B, T, A].\n # Output shape: [batch_size, n_steps, n_hidden]\n outputs = tf.reshape(\n outputs,\n [batch_size, -1, self.params['n_hidden']],\n )\n\n return {\n 'outputs': outputs,\n 'src_length': src_length,\n }", "def decoder_block(layer_in, skip_in, n_filters, dropout=True):\n\n # weight initialization\n init = RandomNormal(stddev=0.02)\n # add upsampling layer\n g = Conv2DTranspose(n_filters, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(layer_in)\n # add batch normalization\n g = BatchNormalization()(g, training=True)\n # conditionally add dropout\n if dropout:\n g = Dropout(0.5)(g, training=True)\n # merge with skip connection\n g = Concatenate()([g, skip_in])\n # relu activation\n g = Activation('relu')(g)\n\n return g", "def forward(self, trg_embed, encoder_hidden, encoder_final, \n src_mask, trg_mask, hidden=None, max_len=None):\n \n # the maximum number of steps to unroll the RNN\n #print(\"czw src mask\", src_mask.size())\n #print(\"czw trg embed\", trg_embed.size())\n #print(\"czw encoder_hidden\", encoder_hidden.size())\n #print(\"czw encoder_final\", encoder_final[0].size())\n if max_len is None:\n max_len = trg_embed.size(1)\n\n # initialize decoder hidden state\n if hidden is None:\n hidden = self.init_hidden(encoder_final)\n \n # pre-compute projected encoder hidden states\n # (the \"keys\" for the attention mechanism)\n # this is only done for efficiency\n proj_key = self.attention.key_layer(encoder_hidden)\n \n # here we store all intermediate hidden states and pre-output vectors\n decoder_states = []\n pre_output_vectors = []\n \n # unroll the decoder RNN for max_len steps\n for i in range(max_len):\n prev_embed = trg_embed[:, i].unsqueeze(1)\n output, hidden, pre_output = self.forward_step(\n prev_embed, encoder_hidden, src_mask, proj_key, hidden)\n decoder_states.append(output)\n pre_output_vectors.append(pre_output)\n\n decoder_states = torch.cat(decoder_states, dim=1)\n pre_output_vectors = torch.cat(pre_output_vectors, dim=1)\n return decoder_states, hidden, pre_output_vectors # [B, N, D]", "def forward(self, inputs, states):\n out, new_states = self.core(inputs, states)\n # for decoder seq_length = forecast horizon\n h = out[:, :, :]\n h = self.fc1(h)\n # Here output_feature_size chosen as hidden size\n h = self.dropout(h)\n h = F.leaky_relu(h, negative_slope=self.relu_leak)\n output = self.fc2(h)\n # output = torch.stack([fc(h).squeeze(dim=2) for fc in self.fc2], dim=2)\n return output, new_states" ]
[ "0.72808146", "0.70303583", "0.6751413", "0.6578204", "0.65489453", "0.6506799", "0.6493319", "0.6480627", "0.6457797", "0.643704", "0.6367421", "0.6358158", "0.6303986", "0.62878895", "0.6264638", "0.6258904", "0.62526155", "0.62208277", "0.6188875", "0.6128146", "0.6125585", "0.61127716", "0.6081616", "0.60756147", "0.606569", "0.60243577", "0.6005663", "0.59780306", "0.59761083", "0.5967339", "0.59666646", "0.5934867", "0.59256196", "0.59125364", "0.5904166", "0.59038574", "0.5873545", "0.5871699", "0.5871671", "0.58640075", "0.5839157", "0.5832885", "0.57977575", "0.5791469", "0.57851934", "0.5778563", "0.57663244", "0.5763172", "0.57626283", "0.5760511", "0.57517433", "0.57400215", "0.57388365", "0.57351345", "0.57265484", "0.5708028", "0.569486", "0.5691362", "0.5690375", "0.56884587", "0.5687111", "0.56843543", "0.5674987", "0.5666524", "0.5660267", "0.5655604", "0.5654025", "0.56487316", "0.564438", "0.56409585", "0.56347483", "0.5633835", "0.5627745", "0.5618256", "0.5613074", "0.56046957", "0.5603881", "0.55946076", "0.5592567", "0.55853045", "0.5569374", "0.5569096", "0.55644196", "0.5564348", "0.5548905", "0.554723", "0.554466", "0.5543769", "0.5540338", "0.5540338", "0.5538604", "0.55359185", "0.55355823", "0.5524923", "0.5522734", "0.55162024", "0.55129", "0.55064845", "0.55041033", "0.5501983", "0.55003285" ]
0.0
-1
Calculating the psi operator for the transport and production of the enstrophy
def psi_enstrophy( Tau, # SGS; (6,64,64,64) h = False, # spatial step size flag = True): # spectral flag; default is gradient tool #---------------------------------------------------------------------# # Default variables # #---------------------------------------------------------------------# if h is False: Pi = np.pi N = 64 h = (2.0*Pi)/N #---------------------------------------------------------------------# # Preallocation variables # #---------------------------------------------------------------------# dim = np.shape(Tau)[1] Psi = np.zeros((9, dim, dim, dim)) #---------------------------------------------------------------------# # Calculating psi using spectral methods # #---------------------------------------------------------------------# if flag is False: kspec = np.fft.fftfreq(dim) * dim Kfield = np.array(np.meshgrid(kspec, kspec, kspec, indexing='ij')) #-----------------------------------------------------------------# # Psi_{11} # #-----------------------------------------------------------------# Psi[0] = np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[2])).real -\ np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[1])).real #-----------------------------------------------------------------# # Psi_{12} # #-----------------------------------------------------------------# Psi[1] = np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[4])).real -\ np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[3])).real #-----------------------------------------------------------------# # Psi_{13} # #-----------------------------------------------------------------# Psi[2] = np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[5])).real -\ np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[4])).real #-----------------------------------------------------------------# # Psi_{21} # #-----------------------------------------------------------------# Psi[3] = np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[0])).real -\ np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[2])).real #-----------------------------------------------------------------# # Psi_{22} # #-----------------------------------------------------------------# Psi[4] = np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[1])).real -\ np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[4])).real #-----------------------------------------------------------------# # Psi_{23} # #-----------------------------------------------------------------# Psi[5] = np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[2])).real -\ np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[5])).real #-----------------------------------------------------------------# # Psi_{31} # #-----------------------------------------------------------------# Psi[6] = np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[1])).real -\ np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[0])).real #-----------------------------------------------------------------# # Psi_{32} # #-----------------------------------------------------------------# Psi[7] = np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[3])).real -\ np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[1])).real #-----------------------------------------------------------------# # Psi_{33} # #-----------------------------------------------------------------# Psi[8] = np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[4])).real -\ np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[2])).real #---------------------------------------------------------------------# # Calculating psi using gradient tool # #---------------------------------------------------------------------# else: #-----------------------------------------------------------------# # Psi_{11} # #-----------------------------------------------------------------# Psi[0] = np.gradient(Tau[2],h, edge_order=2)[1] -\ np.gradient(Tau[1], h, edge_order=2)[0] #-----------------------------------------------------------------# # Psi_{12} # #-----------------------------------------------------------------# Psi[1] = np.gradient(Tau[4],h, edge_order=2)[1] -\ np.gradient(Tau[3], h, edge_order=2)[0] #-----------------------------------------------------------------# # Psi_{13} # #-----------------------------------------------------------------# Psi[2] = np.gradient(Tau[5],h, edge_order=2)[1] -\ np.gradient(Tau[4], h, edge_order=2)[0] #-----------------------------------------------------------------# # Psi_{21} # #-----------------------------------------------------------------# Psi[3] = np.gradient(Tau[0],h, edge_order=2)[0] -\ np.gradient(Tau[2], h, edge_order=2)[2] #-----------------------------------------------------------------# # Psi_{22} # #-----------------------------------------------------------------# Psi[4] = np.gradient(Tau[1],h, edge_order=2)[0] -\ np.gradient(Tau[4], h, edge_order=2)[2] #-----------------------------------------------------------------# # Psi_{23} # #-----------------------------------------------------------------# Psi[5] = np.gradient(Tau[2],h, edge_order=2)[0] -\ np.gradient(Tau[5], h, edge_order=2)[2] #-----------------------------------------------------------------# # Psi_{31} # #-----------------------------------------------------------------# Psi[6] = np.gradient(Tau[1],h, edge_order=2)[2] -\ np.gradient(Tau[0], h, edge_order=2)[1] #-----------------------------------------------------------------# # Psi_{32} # #-----------------------------------------------------------------# Psi[7] = np.gradient(Tau[3],h, edge_order=2)[2] -\ np.gradient(Tau[1], h, edge_order=2)[1] #-----------------------------------------------------------------# # Psi_{33} # #-----------------------------------------------------------------# Psi[8] = np.gradient(Tau[4],h, edge_order=2)[2] -\ np.gradient(Tau[2], h, edge_order=2)[1] return Psi
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _phi2psi(self):\n try:\n locq = self.param_q(self.rhotor)\n except:\n self._readeqdsk(self.shot)\n locq = self.param_q(self.rhotor)\n \n locphi = self.rhotor**2\n psi = integrate.cumtrapz(1/locq,locphi)\n psi = np.concatenate([[0], psi])\n psi = psi/max(psi)\n self.param_psi = interpolate.interp1d(self.rhotor, psi) \n \n\n # tmpnum=100000\n # locq = self.param_q(np.linspace(0,1,tmpnum)) #augmenting precision near the core\n # locphi = self.rhotor**2\n # locphi_p = interpolate.interp1d(np.linspace(0,1,len(locphi)),locphi)\n # locphi = locphi_p(np.linspace(0,1,tmpnum))\n # psi = integrate.cumtrapz(1/locq,locphi)\n # psi = np.concatenate([[0], psi])\n # psi = psi/max(psi)\n # rhopsi = psi**0.5\n # self.param_psi = interpolate.interp1d(np.linspace(0,1,tmpnum), rhopsi)", "def exptomo(self, psi):\n return np.exp(1j*psi * self.voxelsize * self.wavenumber())", "def _psi_ ( self ) :\n return psis", "def _phi2psi(self):\n try:\n locq = self.param_q(self.rhotor)\n except:\n self._readeqdsk(self.shot)\n locq = self.param_q(self.rhotor)\n \n locphi = self.rhotor**2\n psi = integrate.cumtrapz(1/locq,locphi)\n psi = np.concatenate([[0], psi])\n psi = psi/max(psi)\n self.param_psi = interpolate.interp1d(self.rhotor, psi)", "def cal_phi(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for phi routine)')\n\n if(self.px>0):\n self.phi=math.atan(self.py/self.px)\n elif(self.px<0):\n self.phi=math.atan(self.py/self.px)+math.pi\n elif(self.py>0): #remind that p(1)=0\n self.phi=math.pi/2.0\n elif(self.py<0): # remind that p(1)=0\n self.phi=-math.pi/2.0\n else:\n print \"Warning self.phi not properly defined put value to 0\"\n self.phi=0\n \n if(self.phi<0):\n self.phi=self.phi+2*math.pi\n\n return self.phi", "def psi_wf(self, vw, d1, d2, ns, tl):\n\t osmotic = (R*299./VW)*np.log((((vw/self.ZW)*self.ZW)/(VW))/((((vw/self.ZW)*self.ZW)/(VW))+ns))/10**6 #MPa\n\t turgor = ((vw/self.ZW) - d1)**d2#MPa\n\t return turgor+osmotic #MPa ", "def get_mfp(self, T):\n\n self.air.T = T\n self.air.set_TempPres_dependents()\n\n self.mfp = (\n (np.sqrt(2.) * np.pi * self.air.d ** 2. * self.air.n) ** -1.\n )\n\n return self.mfp", "def fluxonium_potential(self):\n return -0.5*(self.Ej * ((1+self.d)*cos(self.phis - 2. * pi * self.phi - 2. * pi * self.phiL) + (1-self.d)*cos(self.phis-2. * pi * self.phiL))) + self.El/2. * (self.phis) ** 2\n #return -0.5*(self.Ej * cos(self.phis - 2. * pi * self.phi) + self.Ej * cos(self.phis)) + self.El/2. * (self.phis-self.phiL)** 2", "def psi(x, y):\n return x", "def phi_opinion(self, persp):\n f1 = self.nrs[persp]+float(self.beta_o)\n f2 = np.sum(self.nrs[persp], axis=1, keepdims=True)+self.VO*self.beta_o\n return f1/f2", "def _phi2psi(self):\n try:\n self.param_q.mean()\n except:\n self._readeqdsk()\n tmpnum=100000\n locq = self.param_q(np.linspace(0,1,tmpnum)) #augmenting precision near the core\n locphi = np.linspace(0,1,tmpnum)\n psi = integrate.cumtrapz(1/locq,locphi)\n psi = np.concatenate([[0], psi])\n psi = psi/max(psi)\n rhopsi = psi\n self.param_psi = interpolate.interp1d(np.linspace(0,1,tmpnum), rhopsi)", "def psi(n, x):\n H = h(n, x, orthonormal=True)\n weight = np.exp(-(x ** 2) / 2)\n psi = H * weight\n return psi", "def cal_eta(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for eta routine)')\n \n theta=math.acos(self.pz/math.sqrt(self.px**2+self.py**2+self.pz**2))\n self.eta=-math.log(math.tan(theta/2.0))", "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def psi(n,x):\r\n a = 1/(sqrt((2**n)*fac(n)*sqrt(pi)))\r\n b = (e)**(-1*(x**2)*0.5)\r\n H_n = H(n,x)\r\n return a*b*(H_n)", "def _calc_interaction_expansion(self):\n # preevaluate expansions for volume and surface phase functions\n # this returns symbolic code to be then further used\n\n volexp = self.V.legexpansion(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n self.geometry).doit()\n\n brdfexp = self.SRF.legexpansion(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n self.geometry).doit()\n\n # preparation of the product of p*BRDF for coefficient retrieval\n # this is the eq.23. and would need to be integrated from 0 to 2pi\n fPoly = expand(2 * sp.pi * volexp * brdfexp)\n\n # do integration of eq. 23\n expr = self._integrate_0_2pi_phis(fPoly)\n\n # now we do still simplify the expression to be able to express\n # things as power series of cos(theta_s)\n theta_s = sp.Symbol('theta_s')\n replacements = [(sp.sin(theta_s) ** i,\n expand((1. - sp.cos(theta_s) ** 2)\n ** sp.Rational(i, 2)))\n for i in range(1, self.SRF.ncoefs + self.V.ncoefs - 1)\n if i % 2 == 0]\n\n res = expand(expr.xreplace(dict(replacements)))\n\n return res", "def internalenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n u = g - temp*g_t - pres*g_p\n return u", "def F_trans(self):\n common_scale = self.edp_par['common_scale'].value\n R_HM = self.edp_par['R_HM'].value\n X_h = self.edp_par['X_h'].value\n psi = self.edp_par['psi'].value \n arg = self.qz*X_h*np.cos(psi) - self.qx*X_h*np.sin(psi)\n return common_scale * (R_HM*np.cos(arg) - 1)", "def flux_qubit_potential(self):\n return -self.Ej * cos(self.phis - 2. * pi * self.phi) + self.El/2. * (self.phis) ** 2", "def addPsiOsmo(self):\n # Salinity is 0 ppt is the basic scenario\n self._psi_osmo = np.array([0] * self.no_plants)", "def psi(x):\n return np.sin(x)", "def u(E_wholesale_P, fixed_P_component, price_elast, xi, q):\n \n end_P = p_endconsumers(E_wholesale_P, fixed_P_component)\n u = xi / (1.0 - 1.0 / price_elast) * q**(1.0 - 1.0 / price_elast) - end_P * q\n \n return u", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def energy_tot(P,F,H,molecule):\n return energy_el(P,F,H) + energy_n(molecule)", "def idealOpAmp():", "def psi(self):\n return PoundSquareInch(self.base_value / 6894.76)", "def psi(arity, degree, convention=convention):\n if degree == 0:\n return SurjectionElement({tuple(range(1, arity + 1)): 1},\n convention=convention)\n else:\n previous = psi(arity, degree - 1, convention=convention)\n acted_on = operators[degree % 2] * previous\n answer = h(acted_on)\n return answer", "def energy(ps):\n return kinetic_energy(ps) + potential_energy(ps)", "def psi_packet_k(pos=x1,angular_f=omega1,time=t1,phi_dic=example_phik()):\n\n phi_var = phi_dic['var']\n phi_par = 1, np.pi, \n phi_y = phi_dic['y']\n\n psi_var = sy.var('x omega t i')\n psi_par = pos, angular_f, time, sy.I\n\n # var = phi_var + psi_var\n # par = phi_par + psi_par\n var = psi_var\n par = psi_var\n\n k = sy.symbols('k')\n y1 = phi_y * sy.exp( i * (k*x - omega*t) )\n\n return y1\n\n # def integrand(k,x,omega,t):\n # return phi(k) * np.exp( 1j * (k*x - omega*t) )\n #\n # I = si.quad(integrand, -np.inf, np.inf, args=(x,omega,t) )\n #\n # return 1/np.sqrt( 2 * np.pi ) * ( I[0] - I[1] )", "def energy_func(self):\n return (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) +\n self.inl[1].m.val_SI * (\n self.outl[1].h.val_SI - self.inl[1].h.val_SI))", "def phases_from_superoperator(U):\n if U.type=='oper':\n phi_00 = np.rad2deg(np.angle(U[0, 0])) # expected to equal 0 because of our\n # choice for the energy, not because of rotating frame. But not guaranteed including the coupling\n phi_01 = np.rad2deg(np.angle(U[1, 1]))\n phi_10 = np.rad2deg(np.angle(U[3, 3]))\n phi_11 = np.rad2deg(np.angle(U[4, 4]))\n phi_02 = np.rad2deg(np.angle(U[2, 2])) # used only for avgatefid_superoperator_phasecorrected\n phi_20 = np.rad2deg(np.angle(U[6, 6])) # used only for avgatefid_superoperator_phasecorrected\n\n elif U.type=='super':\n phi_00 = 0 # we set it to 0 arbitrarily but it is indeed not knowable\n phi_01 = np.rad2deg(np.angle(U[1, 1])) # actually phi_01-phi_00 etc\n phi_10 = np.rad2deg(np.angle(U[3, 3]))\n phi_11 = np.rad2deg(np.angle(U[4, 4]))\n phi_02 = np.rad2deg(np.angle(U[2, 2]))\n phi_20 = np.rad2deg(np.angle(U[6, 6]))\n\n phi_cond = (phi_11 - phi_01 - phi_10 + phi_00) % 360 # still the right formula independently from phi_00\n\n return phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond", "def pertPole(siso):\n p = np.roots(inv(ctrl.tf(1,1,1)-siso*sisoAttack(siso)).den[0][0])\n normP = (p*p.conjugate())**.5\n return p, normP", "def calc_torsion_psi(self):\n next_res = self.get_offset_residue(1)\n if next_res is None:\n return None\n\n aN = self.get_atom('N')\n aCA = self.get_atom('CA')\n aC = self.get_atom('C')\n naN = next_res.get_atom('N')\n return AtomMath.calc_torsion_angle(aN, aCA, aC, naN)", "def graphite_ocp_PeymanMPM(sto):\n\n u_eq = (\n 0.063\n + 0.8 * np.exp(-75 * (sto + 0.001))\n - 0.0120 * np.tanh((sto - 0.127) / 0.016)\n - 0.0118 * np.tanh((sto - 0.155) / 0.016)\n - 0.0035 * np.tanh((sto - 0.220) / 0.020)\n - 0.0095 * np.tanh((sto - 0.190) / 0.013)\n - 0.0145 * np.tanh((sto - 0.490) / 0.020)\n - 0.0800 * np.tanh((sto - 1.030) / 0.055)\n )\n\n return u_eq", "def psi_x(z, x, gamma):\n \n beta2 = 1-1/gamma**2\n beta = sqrt(beta2)\n \n alp = alpha(z, x, beta2) # Use approximate quatic formulas\n #alp = alpha_exact_case_B_brentq(z, x, beta) # Use numerical root finder\n \n kap = 2*(alp - z)/beta \n # kap = sqrt(x**2 + 4*(1+x) * sin(alp)**2) # kappa(z, x, beta2) inline\n \n sin2a = sin(2*alp)\n cos2a = cos(2*alp) \n \n arg2 = -4 * (1+x) / x**2\n F = my_ellipkinc(alp, arg2) \n E = my_ellipeinc(alp, arg2)\n \n T1 = (1/abs(x)/(1 + x) * ((2 + 2*x + x**2)*F - x**2*E))\n D = kap**2 - beta2 * (1 + x)**2 * sin2a**2\n T2 = ((kap**2 - 2*beta2*(1+x)**2 + beta2*(1+x)*(2 + 2*x + x**2)*cos2a)/ beta/ (1+x)/ D)\n T3 = -kap * sin2a / D\n T4 = kap * beta2 * (1 + x) * sin2a * cos2a / D\n \n out = (T1 + T2 + T3 + T4)\n \n return out", "def get_stream_function_vortex(strength, xv, yv, X, Y):\r\n psi = strength / (4 * math.pi) * numpy.log((X - xv)**2 + (Y - yv)**2)\r\n \r\n return psi", "def psi(self, i):\n res = self.all_residues[i]\n\n if i == len(self.all_residues) or not self.connected_to_next(i):\n return 0.0\n\n try:\n n = res['N'].get_vector()\n ca = res['CA'].get_vector()\n c = res['C'].get_vector()\n res_plus_one = self.all_residues[i + 1]\n\n nn = res_plus_one['N'].get_vector()\n psi = calc_dihedral(n, ca, c, nn)\n return psi\n except Exception:\n print \"Could not get psi for \"+repr(i)\n raise LookupError", "def calc(self):\n\n # the following if query ensures that volume- and interaction-terms\n # are only calculated if tau > 0.\n # (to avoid nan-values from invalid function-evaluations)\n\n if self.V.tau.shape == (1,):\n Isurf = self.surface()\n # differentiation for non-existing canopy, as otherwise NAN values\n if self.V.tau > 0.:\n Ivol = self.volume()\n if self.int_Q is True:\n Iint = self.interaction()\n else:\n Iint = np.array([0.])\n else:\n Ivol = np.array([0.])\n Iint = np.array([0.])\n else:\n # calculate surface-term (valid for any tau-value)\n Isurf = self.surface()\n\n # store initial parameter-values\n old_t_0 = self.t_0\n old_p_0 = self.p_0\n old_t_ex = self.t_ex\n old_p_ex = self.p_ex\n\n old_tau = self.V._get_tau()\n old_omega = self.V._get_omega()\n old_NN = self.SRF._get_NormBRDF()\n\n # set mask for tau > 0.\n mask = old_tau > 0.\n valid_index = np.where(mask)\n inval_index = np.where(~mask)\n\n # set parameter-values to valid values for calculation\n self.t_0 = old_t_0[valid_index[0]]\n self.p_0 = old_p_0[valid_index[0]]\n self.t_ex = old_t_ex[valid_index[0]]\n self.p_ex = old_p_ex[valid_index[0]]\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically adds an axis to the arrays!\n self.V.tau = np.squeeze(old_tau[valid_index[0]])\n if np.array(self.V.omega).size != 1:\n self.V.omega = np.squeeze(old_omega[valid_index[0]])\n if np.array(self.SRF.NormBRDF).size != 1:\n self.SRF.NormBRDF = np.squeeze(old_NN[valid_index[0]])\n\n # calculate volume and interaction term where tau-values are valid\n _Ivol = self.volume()\n if self.int_Q is True:\n _Iint = self.interaction()\n else:\n _Iint = np.full_like(self.t_0, 0.)\n\n # reset parameter values to old values\n self.t_0 = old_t_0\n self.p_0 = old_p_0\n self.t_ex = old_t_ex\n self.p_ex = old_p_ex\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically add an axis to the arrays!\n self.V.tau = np.squeeze(old_tau)\n self.V.omega = np.squeeze(old_omega)\n self.SRF.NormBRDF = np.squeeze(old_NN)\n\n # combine calculated volume-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n Ivol = np.ones_like(self.t_0)\n Ivol[valid_index[0]] = _Ivol\n Ivol[inval_index[0]] = np.ones_like(Ivol[inval_index[0]]) * 0.\n\n # combine calculated interaction-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n if self.int_Q is True:\n Iint = np.ones_like(self.t_0)\n Iint[valid_index[0]] = _Iint\n Iint[inval_index[0]] = np.ones_like(Iint[inval_index[0]]) * 0.\n else:\n Iint = np.full_like(self.t_0, 0.)\n\n return Isurf + Ivol + Iint, Isurf, Ivol, Iint", "def _get_phi(self):\n phi = self.phi(self._data[SoilProperty.N60])\n ### Ok let's remove for clay\n if self.is_clayey():\n phi=0 #very small value for plasix:::@TODO 0.01\n return phi", "def exner_function(pressure, reference_pressure=P0):\n return (pressure / reference_pressure)**kappa", "def two_body_old(sys, psi):\n # psi = np.reshape(psi,\n # (fci.cistring.num_strings(sys.nsites, sys.nup), fci.cistring.num_strings(sys.nsites, sys.ndown)))\n D = 0.\n for i in range(sys.nsites):\n w = (i + 1) % sys.nsites\n v = (i - 1) % sys.nsites\n\n D += harmonic.compute_inner_product(psi, sys.nsites, (sys.nup, sys.ndown), [i, w, i, i], [1, 0, 1, 0], [1, 1, 0, 0])\n\n D += harmonic.compute_inner_product(psi, sys.nsites, (sys.nup, sys.ndown), [i, i, i, w], [1, 0, 1, 0], [1, 1, 0, 0])\n\n D -= harmonic.compute_inner_product(psi, sys.nsites, (sys.nup, sys.ndown), [v, i, i, i], [1, 0, 1, 0], [1, 1, 0, 0])\n\n D -= harmonic.compute_inner_product(psi, sys.nsites, (sys.nup, sys.ndown), [i, i, v, i], [1, 0, 1, 0], [1, 1, 0, 0])\n\n return D.conj()", "def old_psi_x(z, x, beta):\n \n beta2 = beta**2\n \n alp = old_alpha(z, x, beta2)\n kap = sqrt(x**2 + 4*(1+x) * sin(alp)**2) # kappa(z, x, beta2) inline\n \n sin2a = sin(2*alp)\n cos2a = cos(2*alp) \n \n arg2 = -4 * (1+x) / x**2\n \n ellipkinc = ss.ellipkinc(alp, arg2) \n ellipeinc = ss.ellipeinc(alp, arg2)\n \n T1 = (1/abs(x)/(1 + x) * ((2 + 2*x + x**2) * ellipkinc - x**2 * ellipeinc))\n D = kap**2 - beta2 * (1 + x)**2 * sin2a**2\n T2 = ((kap**2 - 2*beta2 * (1+x)**2 + beta2 * (1+x) * (2 + 2*x + x**2) * cos2a)/ beta/ (1+x)/ D)\n T3 = -kap * sin2a / D\n T4 = kap * beta2 * (1 + x) * sin2a * cos2a / D\n T5 = 1 / abs(x) * ellipkinc # psi_phi without e/rho**2 factor\n out = (T1 + T2 + T3 + T4) - 2 / beta2 * T5\n\n \n return out", "def psi_xm(E_val,lec,lam):\n x = np.linspace(0, xm, n+1) # grid in the x-direction\n y = np.zeros(n+1) # wave-function in individual points\n # initial conditions\n y[0] = 0\n y[1] = 1.0\n #\n for i in range(1,n):\n y[i + 1] = (2 - 5 * dx2 * f(i, E_val,lec,lam) / 6) * y[i] - (1 + dx2 * f(i-1, E_val,lec,lam) / 12) * y[i - 1]\n y[i + 1] /= (1 + dx2 * f(i+1, E_val,lec,lam) / 12)\n return y[n]-asymptotic_boundary(-E_val)", "def Phi(l,m,theta,phi):\n Psilm_th, Psilm_ph=Psi(l,m,theta,phi);\n Philm_th=-Psilm_ph;\n Philm_ph=+Psilm_th;\n return Philm_th, Philm_ph", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def psi(a):", "def compute_thermo(E,dos,TT):\n if (len(dos)<3):\n print (\"Not enough points in the phonon DOS!\")\n return None\n \n ZPE = 0.5*dos_integral(E,dos,1)\n modes = dos_integral(E,dos)\n \n EvibT = np.zeros(len(TT))\n SvibT = np.zeros(len(TT))\n CvibT = np.zeros(len(TT))\n FvibT = np.zeros(len(TT))\n for i in range(0,len(TT)):\n h = 0.5*(E[2]-E[0])\n arg = K_BOLTZMANN_RY*TT[i]\n arg2 = 2.0 * arg\n Evib = 0.0\n Svib = 0.0\n Cvib = 0.0\n for j in range(0,len(dos)-3,3):\n\n Evib += 3.0*E[j]/tanh(E[j]/(arg2))*dos[j]+\\\n 3.0*E[j+1]/tanh(E[j+1]/(arg2))*dos[j+1]+\\\n 2.0*E[j+2]/tanh(E[j+2]/(arg2))*dos[j+2]\n \n Svib += 3.0*(E[j]/arg2/tanh(E[j]/arg2)-log(2.0*sinh(E[j]/arg2)))*dos[j]+\\\n 3.0*(E[j+1]/arg2/tanh(E[j+1]/arg2)-log(2.0*sinh(E[j+1]/arg2)))*dos[j+1]+\\\n 2.0*(E[j+2]/arg2/tanh(E[j+2]/arg2)-log(2.0*sinh(E[j+2]/arg2)))*dos[j+2]\n\n try: # avoid overflow error for arg very small\n Cvib += 3.0*pow(E[j]/arg,2)/( 4.0*pow(sinh(E[j]/(arg2)),2) )*dos[j]+\\\n 3.0*pow(E[j+1]/arg,2)/( 4.0*pow(sinh(E[j+1]/(arg2)),2) )*dos[j+1]+\\\n 2.0*pow(E[j+2]/arg,2)/( 4.0*pow(sinh(E[j+2]/(arg2)),2) )*dos[j+2]\n except:\n Cvib += 0.0\n\n EvibT[i] = h*0.5*Evib*3.0/8.0 # h is the integration step, 0.5 comes from the equation for E,\n # the factor 3.0/8.0 comes from the Simpson 3/8 rule\n SvibT[i] = h*K_BOLTZMANN_RY*Svib*3.0/8.0\n CvibT[i] = h*K_BOLTZMANN_RY*Cvib*3.0/8.0\n FvibT = EvibT - SvibT * TT\n\n print ()\n return TT, EvibT, SvibT, CvibT, FvibT, ZPE, modes", "def getPhi(mass,resonance):\n return numpy.arctan((resonance.r0*resonance.w0)/(mass**2-resonance.w0**2)) #need to make this arccotan? invert args", "def stoichiometry(self, fluid):\n air = self.air_alias.val\n fuel = self.fuel_alias.val\n flue_gas = self.fuel_alias.val + '_fg'\n\n ###################################################################\n # calculate fuel and air mass flow\n m_fuel = 0\n for i in self.inl:\n m_fuel += i.m.val_SI * i.fluid.val[fuel]\n\n m_air = 0\n for i in self.inl:\n m_air += i.m.val_SI * i.fluid.val[air]\n\n m_air_min = self.air_min * m_fuel\n\n ###################################################################\n # calculate lambda if not specified\n if not self.lamb.is_set:\n self.lamb.val = m_air / (self.air_min * m_fuel)\n\n ###################################################################\n # calculate excess fuel if lambda is smaller than 1\n m_fuel_exc = 0\n if self.lamb.val < 1:\n m_fuel_exc = m_fuel - m_air / (self.lamb.val * self.air_min)\n\n ###################################################################\n # equation for air\n if fluid == air:\n if self.lamb.val >= 1:\n dm = -m_air_min\n else:\n dm = -m_air\n\n ###################################################################\n # equation for fuel\n elif fluid == fuel:\n dm = -(m_fuel - m_fuel_exc)\n\n ###################################################################\n # equation for flue gas\n elif fluid == flue_gas:\n dm = m_air_min + m_fuel\n\n ###################################################################\n # equation for other components\n else:\n dm = 0\n\n res = dm\n for i in self.inl:\n res += i.fluid.val[fluid] * i.m.val_SI\n for o in self.outl:\n res -= o.fluid.val[fluid] * o.m.val_SI\n return res", "def Phi(self):\n return self.E0*np.sqrt(self.f1*self.f2*self.f3)*self.dT**2*self.dp.phi0", "def energy_PhotonFormula(wave=1.0,energy=0.00):\n global r,c,h\n print(\"Enerji var ise lutfen giriniz.\")\n if energy != 0:\n energy = energy\n else:\n energy=h*(c/wave)\n getit =str(input(\"Dalga boyunu istiyorsaniz d,enerji istiyorsaniz bos birakin.\"))\n if getit == 'd':\n return ('%.2E' % Decimal(str(energy/(h*c))))\n elif getit ==\"\":\n ('%.2E' % Decimal(str(energy)))\n print(\"Yanlis girdi.Yeniden dene.\")\n return energy_PhotonFormula(wave)", "def solucaoEspacoPontoX(arq,t,x):\n \n parametros = open(arq,'r')\n lista = []\n for linha in parametros:\n valores = linha.split()\n lista.append( valores[1] )\n Ks = float(lista[0]) # Conductibilidade na fase solida\n Kl = float(lista[1]) # Conductibilidade na fase liquida\n Tf = float(lista[2]) # temperatura de mudanca de fase\n Tinf = float(lista[3]) # Temperatura inicial na fase liquida T0\n Tw = float(lista[4]) # Temperatura na fronteira no instante inicial\n L = float(lista[5]) # calor latente\n cs = float(lista[6]) # calor especifico na zona solida\n cl = float(lista[7]) # calor especifico na zona liquida\n cf = float(lista[8]) # calor especifico na zona de mudanca de fase\n p = float(lista[9]) # pho = massa especifica\n parametros.close()\n \n lam = 0.5064 # chute inicial\n xx = x\n lamb = calculaLambda(funcao,lam,Tinf,Tf,Ks,Kl,L,cs,Tw)\n frente = X_espaco(lamb,Ks,t)\n if (frente > xx):\n T = temperaturaZonaSolida_espaco(Tf,Ks,lamb,xx,t,Tw)\n elif (frente < xx):\n T = temperaturaZonaLiquida_espaco(Tinf,Tf,Ks,Kl,lamb,xx,t)\n else:\n T = Tf\n return T", "def get_sol(self):", "def pt_operators(N, L, J, theta, f, phi, band_idxs, deltaE, exact=False, qs=None, verbose=False, **kwargs):\n if qs is None:\n qs = range(N)\n BandIdxs = band_idxs\n DeltaE = deltaE\n if 'theta0' in kwargs:\n theta0 = kwargs['theta0']\n else:\n theta0 = theta\n H0Op = pf.ParafermionicChainOp(N, L, J, theta0, 0.0, 0.0, q=0) # we get MPO object for full unperturbed Hamiltonian (note the same in each sector)\n HfOps = []\n for q in qs:\n if 'exclude_side' in kwargs:\n if kwargs['exclude_side'] == 'left':\n fs = np.ones(L)*f\n fs[0] = 0.0\n Hf = pf.ParafermionicChainOp(N, L, J, theta, fs, phi, q=q)\n elif kwargs['exclude_side'] == 'right':\n fs = np.ones(L)*f\n fs[-1] = 0.0\n Hf = pf.ParafermionicChainOp(N, L, J, theta, fs, phi, q=q)\n elif kwargs['exclude_side'] == 'neither':\n fs = np.ones(L)*f\n fs[0] = 0.0\n fs[-1] = 0.0\n Hf = pf.ParafermionicChainOp(N, L, J, theta, fs, phi, q=q)\n else:\n raise Exception('\\'exlude_side\\' argument should be either left or right')\n else:\n Hf = pf.ParafermionicChainOp(N, L, J, theta, f, phi, q=q)\n Hf.add(H0Op, c1=1.0, c2=-1.0, inplace=True, compress=False)\n HfOps.append(Hf)\n [Partitions, H0Energies] = H0Op.get_bands_and_energies() # get all the partitions and energies of each\n BandEnergy = H0Energies[BandIdxs[0]] # get the energy of the band we start from, this is E0\n BandPartitions = list(map(lambda x: Partitions[x], BandIdxs)) # get the\n FullBand = np.vstack(list(map(lambda x: pfcy.GetFullBandDW(BandPartitions[x]), range(len(BandIdxs)))))\n FullBandDim = len(FullBand)\n [NeighbouringBands,] = np.where(np.abs(H0Energies - BandEnergy) < DeltaE) # find other bands within deltaE in energy\n FullSubspace = np.copy(FullBand)\n for NeighbouringBand in NeighbouringBands:\n if NeighbouringBand not in BandIdxs:\n FullSubspace = np.vstack((FullSubspace, pfcy.GetFullBandDW(Partitions[NeighbouringBand])))\n FullSubspaceDim = FullSubspace.shape[0]\n if verbose: print('Full subspace dim: ' + str(FullSubspaceDim) + '.')\n x = np.arange(FullSubspaceDim)\n I = sps.diags(np.ones(FullSubspaceDim), 0)\n P0 = sps.diags(np.piecewise(x, [x < FullBandDim, x >= FullBandDim], [1.0, 0.0]), 0)\n Q0 = sps.diags(np.piecewise(x, [x < FullBandDim, x >= FullBandDim], [0.0, 1.0]), 0)\n s = time.time()\n H0 = H0Op.mats_subspace(FullSubspace)\n e = time.time()\n if verbose: print('Time taken to calculate H0 matrix: ' + str(e-s) + ' seconds.')\n\n s = time.time()\n Hfs = list(map(lambda x : HfOps[x].mats_subspace(FullSubspace), qs))\n e = time.time()\n if verbose: print('Time taken to calculate V matrices: ' + str(e-s) + ' seconds.')\n denominators = (BandEnergy - H0.diagonal()[FullBandDim:])\n if len(np.where(denominators == 0)[0]) > 0:\n return None\n Q = sps.diags(np.hstack([np.zeros(FullBandDim),np.ones(FullSubspaceDim-FullBandDim)/denominators]), 0)\n\n if exact:\n Offset = np.sum(map(lambda x: len(pfcy.GetFullBandDW(Partitions[x])), range(min(BandIdxs))))\n # for debugging purposes, calculate some of full spectrum exactly, can be time consuming\n FullEs = list(map(lambda x: pf.Diagonalise(N, L, J, theta, f, phi, q=x, k=Offset + FullBandDim), qs))\n FullEs = list(map(lambda x: FullEs[x][0][Offset:(Offset+FullBandDim)], qs))\n else:\n FullEs = None\n\n return [P0, Q, H0, Hfs, FullBandDim, BandEnergy, FullEs]", "def calc_eta_FC(Q_load_W, Q_design_W, phi_threshold, approach_call):\n phi = 0.0\n\n ## Approach A - NREL Approach\n if approach_call == \"A\":\n\n phi = float(Q_load_W) / float(Q_design_W)\n eta_max = 0.425 # from energy.gov\n\n if phi >= phi_threshold: # from NREL-Shape\n eta_el = eta_max - ((1 / 6.0 * eta_max) / (1.0 - phi_threshold)) * abs(phi - phi_threshold)\n\n if phi < phi_threshold:\n if phi <= 118 / 520.0 * phi_threshold:\n eta_el = eta_max * 2 / 3 * (phi / (phi_threshold * 118 / 520.0))\n\n if phi < 0.5 * phi_threshold and phi >= 118 / 520.0 * phi_threshold:\n eta_el = eta_max * 2 / 3.0 + \\\n eta_max * 0.25 * (phi - phi_threshold * 118 / 520.0) / (phi_threshold * (0.5 - 118 / 520.0))\n\n if phi > 0.5 * phi_threshold and phi < phi_threshold:\n eta_el = eta_max * (2 / 3.0 + 0.25) + \\\n 1 / 12.0 * eta_max * (phi - phi_threshold * 0.5) / (phi_threshold * (1 - 0.5))\n\n eta_therm_max = 0.45 # constant, after energy.gov\n\n if phi < phi_threshold:\n eta_therm = 0.5 * eta_therm_max * (phi / phi_threshold)\n\n else:\n eta_therm = 0.5 * eta_therm_max * (1 + eta_therm_max * ((phi - phi_threshold) / (1 - phi_threshold)))\n\n ## Approach B - Empiric Approach\n if approach_call == \"B\":\n\n if Q_design_W > 0:\n phi = float(Q_load_W) / float(Q_design_W)\n\n else:\n phi = 0\n\n eta_el_max = 0.39\n eta_therm_max = 0.58 # * 1.11 as this source gives eff. of HHV\n eta_el_score = -0.220 + 5.277 * phi - 9.127 * phi ** 2 + 7.172 * phi ** 3 - 2.103 * phi ** 4\n eta_therm_score = 0.9 - 0.07 * phi + 0.17 * phi ** 2\n\n eta_el = eta_el_max * eta_el_score\n eta_therm = eta_therm_max * eta_therm_score\n\n if phi < 0.2:\n eta_el = 0\n\n return eta_el, eta_therm", "def solve_prep(self):\n\n par = self.par\n sol = self.sol\n\n # a. retirement\n sol.m_ret = np.zeros((par.T,par.Nm_ret))\n sol.c_ret = np.zeros((par.T,par.Nm_ret))\n sol.a_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_v_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vm_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vn_ret = np.zeros((par.T,par.Nm_ret))\n\n # b. working\n if par.solmethod == 'G2EGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.ucon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.dcon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.acon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.z = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n \n elif par.solmethod == 'NEGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((0,0,0))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((0,0,0))\n \n sol.c_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))\n sol.inv_v_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))", "def calculate_com(self):\n vr, vphi, gamma = self.emitter.get_velocities()\n u1, u3, gamma2 = self.emitter.get_rotation_velocities()\n math_v, gamma3 = self.emitter.get_momentum_velocity()\n rho = self.emitter.rho\n\n alpha = 5/2 * self.emitter.get_s() / rho**2\n\n E = self._E(self.chi, self.eta, self.iota, gamma, vphi, gamma2, u1, u3, math_v, gamma3)\n L = self._L(self.chi, self.eta, self.iota, gamma, vphi, gamma2, u1, u3, math_v, gamma3)\n Q = self._Q(self.chi, self.eta, L)\n\n return E, L, Q", "def proximal(self):\n functional = self\n\n class EntRegOptTransProximal(Operator):\n\n \"\"\"Proximal operator of entropy regularized optimal transport.\n\n The prox is given by::\n\n prox_[gamma*T_eps](mu1) = arg min_x (T_epsilon(mu0, x) +\n 1/(2*gamma) ||x - mu1||^2_2)\n \"\"\"\n\n def __init__(self, sigma):\n \"\"\"Initialize a new instance.\n\n Parameters\n ----------\n sigma : positive float\n \"\"\"\n self.sigma = float(sigma)\n super().__init__(domain=functional.domain,\n range=functional.domain, linear=False)\n\n # Setting up parameters\n self.const = 1 / (functional.epsilon * sigma)\n\n def _call(self, x):\n \"\"\"Apply the operator to ``x``.\"\"\"\n u = functional.tmp_u_prox\n v = functional.tmp_v_prox\n\n # Running generalized Sinkhorn iterations\n for j in range(functional.niter):\n # Safe-guarded u-update, to avoid divide-by-zero error.\n u_old = u.copy()\n tmp1 = functional.K_op(v)\n if np.min(tmp1) < 1e-30 or np.max(tmp1) > 1e+50:\n print('Numerical instability, truncation in Transport prox (Kv)',\n str(np.min(tmp1)), str(np.max(tmp1)))\n\n tmp = np.fmax(tmp1, 1e-30)\n\n\n u = functional.mu0 / tmp\n if np.min(u) < 1e-30 or np.max(u) > 1e+50:\n print('u (min/max)', str(np.min(u)), str(np.max(u)))\n\n # Safe-guarded v-update, to avoid divide-by-zero error.\n v_old = v.copy()\n\n tmp3 = functional.K_op_adjoint(u)\n if np.min(tmp3) < 1e-30 or np.max(tmp3) > 1e+50:\n print('Truncation in Transport prox (KTu)',\n str(np.min(tmp3)), str(np.max(tmp3)))\n print('u (min/max)', str(np.min(u)), str(np.max(u)))\n\n tmp4 = (self.const * tmp3 * np.exp(self.const * x))\n\n if np.min(tmp4) < 1e-30 or np.max(tmp4) > 1e+200:\n print('Argument in lambdert omega (min/max)',\n str(np.min(tmp4)), str(np.max(tmp4)))\n\n v = np.exp(self.const * x - lambertw_fulfix(tmp4))\n\n v1 = np.exp(self.const * x - scipy.special.lambertw(\n tmp4))\n if (v-v1).norm() > 1e-10:\n print('diff pga ny lambderw omega funciton',\n str((v-v1).norm()))\n print('v (min/max)', str(np.min(v)), str(np.max(v)))\n print('Argument in lambdert omega (min/max)',\n str(np.min(tmp4)), str(np.max(tmp4)))\n\n # If the updates in both u and v are small, break the loop\n if ((np.log(v)-np.log(v_old)).norm() < 1e-8 and\n (np.log(u)-np.log(u_old)).norm() < 1e-8):\n break\n\n # Store the u and v in the internal temporary variables of the\n # functional\n functional.tmp_u_prox = u\n functional.tmp_v_prox = v\n\n return x - self.sigma * functional.epsilon * np.log(v)\n\n return EntRegOptTransProximal", "def calc_ti(self):\n m = 0\n for i in self.inl:\n m += i.m.val_SI * i.fluid.val[self.fuel_alias.val]\n\n for o in self.outl:\n m -= o.m.val_SI * o.fluid.val[self.fuel_alias.val]\n\n return m * self.lhv", "def phi(self):\n return (np.sum(self.diameters**self.ndim)*np.pi / (2*self.ndim))", "def f(self,y,psi):\r\n\r\n #1. check that number of params is consistent\r\n assert psi.shape[0] == self.n_terms, 'inconsistent parameter dimensions'\r\n assert psi.shape[1] == 3, 'inconsistent parameter dimensions'\r\n\r\n #2. exponentiate the a and b (positive!)\r\n mpsi = psi.copy()\r\n\r\n #3. transform data\r\n z = y.copy()\r\n for i in range(len(mpsi)):\r\n a,b,c = mpsi[i]\r\n z += a*np.tanh(b*(y+c))\r\n return z", "def num_solution(self):\r\n\r\n m = float(self.mass) / self.nu_m\r\n c = self.light_vel\r\n p1 = 0.0\r\n x1 = 0.0\r\n self.xn_track.append(x1)\r\n self.vn.append(0.0)\r\n e = m * c * c\r\n self.en.append(e)\r\n for i in range(1, len(self.obs.obt_g)):\r\n dt = self.t[i] - self.t[i - 1]\r\n ddt = float(self.obs.obt[i] - self.obs.obt[i - 1])\r\n qe = 1 / self.nu_m * (self.nu_t * self.nu_t / self.nu_x) \\\r\n * 1.0 / float(self.size_tick * self.size_tick)\r\n\r\n # print \"qE=\", qe\r\n\r\n p2 = p1 + qe * dt\r\n self.vn.append(p2 / math.sqrt(m ** 2 + (p2 / c) ** 2))\r\n e = e + qe * (self.x[i] - self.x[i - 1])\r\n self.en.append(e)\r\n v = p2 / math.sqrt(m ** 2 + (p2 / c) ** 2)\r\n x2 = x1 + v * dt\r\n self.xn_track.append(x2)\r\n p1 = p2\r\n x1 = x2\r\n print 'Numerical solution of the differential equation of motion'", "def calculate(self) -> float:", "def quantum_theta(self):\n return quantum_theta(self.T_e, self.n_e)", "def sol_p(t, p0, s0, u0, alpha, beta, gamma, eta, delta):\n u = sol_u(t, u0, alpha, beta)\n s = sol_s(t, s0, u0, alpha, beta, gamma)\n exp_gt = np.exp(-delta * t)\n p = p0 * exp_gt + eta / (delta - gamma) * (\n s - s0 * exp_gt - beta / (delta - beta) * (u - u0 * exp_gt - alpha / delta * (1 - exp_gt))\n )\n return p, s, u", "def return_expression(b, p):\n # Properties of Gases and Liquids, Eq. 9-5.14\n # and Eq. 10-6.4\n ViscosityWilke.build_phi_ij(b, p)\n if not hasattr(b, \"_therm_cond_phase_comp\"):\n b._make_therm_cond_phase_comp() # pylint: disable=protected-access\n\n # Properties of Gases and Liquids, Eq. 10-6.2\n return sum(\n [\n b.mole_frac_phase_comp[p, i]\n * b._therm_cond_phase_comp[p, i] # pylint: disable=protected-access\n / sum(\n [\n b.mole_frac_phase_comp[p, j] * b.visc_d_phi_ij[i, j]\n for j in b.components_in_phase(p)\n ]\n )\n for i in b.components_in_phase(p)\n ]\n )", "def calculation_of_propagation(self): \n \n prop = PopulationPropagator(world.time, rate_matrix=world.KK)\n \n pop_ini = numpy.array([1.0, 0.0])\n \n pop_t = prop.propagate(pop_ini)\n \n sta = world.subtime\n \n U = prop.get_PropagationMatrix(sta)\n \n pop_sub = numpy.zeros((2,sta.length))\n \n for i in range(sta.length):\n pop_sub[:,i] = numpy.dot(U[:,:,i],pop_ini) \n \n world.pop_t = pop_t\n world.pop_sub = pop_sub", "def evf(self, photo, phi, ta, psi_l, qa, tl, ci, lai, ared, **kwargs):\n\t return max(lai*(1./(self.gsw(photo, phi, ta, psi_l, qa, tl, ci, ared, **kwargs)*R*ta/P_ATM*1000000.)+1./(self.GA*1000.))**(-1.)\\\n\t *RHO_A/RHO_W*(self.qi(tl, psi_l)-qa), 0.)", "def p(e, t):\n return b * e ** 2", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H1 + sigma_H1\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI", "def equations(self):\n k = 0\n ######################################################################\n # equations for fluid balance\n self.residual[k:k + self.num_nw_fluids] = self.fluid_func()\n k += self.num_nw_fluids\n\n ######################################################################\n # equations for mass flow balance\n self.residual[k] = self.mass_flow_func()\n k += 1\n\n ######################################################################\n # equations for specified heta transfer\n if self.Q.is_set:\n self.residual[k] = self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) - self.Q.val\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio\n if self.pr.is_set:\n self.residual[k] = (\n self.inl[0].p.val_SI * self.pr.val - self.outl[0].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified zeta\n if self.zeta.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(zeta='zeta')\n k += 1\n\n ######################################################################\n # equation for specified hydro-group paremeters\n if self.hydro_group.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n # hazen williams equation\n if self.hydro_group.method == 'HW':\n func = self.hw_func\n # darcy friction factor\n else:\n func = self.darcy_func\n self.residual[k] = func()\n k += 1\n\n ######################################################################\n # additional equations\n self.additional_equations(k)", "def calculate_Sio(tp, c, T, ib, ik, once_called, kgrid, cbm_vbm, epsilon_s, epsilon_inf):\n S_i = [np.array([1e-32, 1e-32, 1e-32]), np.array([1e-32, 1e-32, 1e-32])]\n S_i_th = [np.array([1e-32, 1e-32, 1e-32]), np.array([1e-32, 1e-32, 1e-32])]\n S_o = [np.array([1e-32, 1e-32, 1e-32]), np.array([1e-32, 1e-32, 1e-32])]\n S_o_th = [np.array([1e-32, 1e-32, 1e-32]), np.array([1e-32, 1e-32, 1e-32])]\n\n k = kgrid[tp][\"norm(k)\"][ib][ik]\n a = kgrid[tp][\"a\"][ib][ik]\n c_ = kgrid[tp][\"c\"][ib][ik]\n f = kgrid[tp][\"f\"][c][T][ib][ik]\n f_th = kgrid[tp][\"f_th\"][c][T][ib][ik]\n N_POP = kgrid[tp][\"N_POP\"][c][T][ib][ik]\n\n for j, X_Epm in enumerate([\"X_Eplus_ik\", \"X_Eminus_ik\"]):\n if tp == \"n\" and X_Epm == \"X_Eminus_ik\" and kgrid[tp][\"energy\"][ib][ik] - hbar * \\\n kgrid[tp][\"W_POP\"][ib][ik] < cbm_vbm[tp][\"energy\"]:\n continue\n if tp == \"p\" and X_Epm == \"X_Eplus_ik\" and kgrid[tp][\"energy\"][ib][ik] + hbar * \\\n kgrid[tp][\"W_POP\"][ib][ik] > cbm_vbm[tp][\"energy\"]:\n continue\n counted = len(kgrid[tp][X_Epm][ib][ik])\n for X_ib_ik in kgrid[tp][X_Epm][ib][ik]:\n X, ib_pm, ik_pm = X_ib_ik\n k_pm = kgrid[tp][\"norm(k)\"][ib_pm][ik_pm]\n abs_kdiff = abs(k_pm - k)\n if abs_kdiff < 1e-4 or k<1e-4 or k_pm<1e-4:\n # avoid rate blow-up (e.g. due to self-scattering)\n counted -= 1\n continue\n if abs(kgrid[tp]['energy'][ib_pm][ik_pm] - \\\n kgrid[tp]['energy'][ib][ik]) < \\\n hbar * kgrid[tp][\"W_POP\"][ib][ik] / 2.0:\n counted -= 1\n continue\n g_pm = kgrid[tp][\"g\"][c][T][ib_pm][ik_pm]\n g_pm_th = kgrid[tp][\"g_th\"][c][T][ib_pm][ik_pm]\n v_pm = kgrid[tp][\"norm(v)\"][ib_pm][ik_pm] / sq3 # 3**0.5 is to treat each direction as 1D BS\n a_pm = kgrid[tp][\"a\"][ib_pm][ik_pm]\n c_pm = kgrid[tp][\"c\"][ib_pm][ik_pm]\n if tp == \"n\":\n f_pm = kgrid[tp][\"f\"][c][T][ib_pm][ik_pm]\n f_pm_th = kgrid[tp][\"f_th\"][c][T][ib_pm][ik_pm]\n else:\n f_pm = 1 - kgrid[tp][\"f\"][c][T][ib_pm][ik_pm]\n f_pm_th = 1 - kgrid[tp][\"f_th\"][c][T][ib_pm][ik_pm]\n A_pm = a * a_pm + c_ * c_pm * (k_pm ** 2 + k ** 2) / (2 * k_pm * k)\n beta_pm = (e ** 2 * kgrid[tp][\"W_POP\"][ib_pm][ik_pm]) / (4 * pi * hbar * v_pm) * \\\n (1 / (epsilon_inf * epsilon_0) - 1 / (epsilon_s * epsilon_0)) * 6.2415093e20\n if not once_called:\n lamb_opm = beta_pm * (\n A_pm ** 2 * log((k_pm + k) / (abs_kdiff)) - A_pm * c_ * c_pm - a * a_pm * c_ * c_pm)\n # because in the scalar form k+ or k- is supposed to be unique, here we take average\n S_o[j] += (N_POP + j + (-1) ** j * f_pm) * lamb_opm\n S_o_th[j] += (N_POP + j + (-1) ** j * f_pm_th) * lamb_opm\n\n lamb_ipm = beta_pm * (\n (k_pm**2 + k**2) / (2*k*k_pm) * A_pm**2 *\\\n log((k_pm + k) / (abs_kdiff)) - A_pm**2 - c_**2 * c_pm** 2 / 3.0)\n S_i[j] += (N_POP + (1 - j) + (-1)**(1 - j) * f) * lamb_ipm * g_pm\n S_i_th[j] += (N_POP + (1 - j) + (-1)**(1 - j) * f_th) * lamb_ipm * g_pm_th\n if counted > 0:\n S_i[j] /= counted\n S_i_th[j] /= counted\n S_o[j] /= counted\n S_o_th[j] /= counted\n return [sum(S_i), sum(S_i_th), sum(S_o), sum(S_o_th)]", "def solve(self):\n wort_gravity = self.property('start_gravity').to('sg') +\\\n (self.total_points().to('points') / self.property('wort_volume').to('gal') / 1000.0)\n self.property('wort_gravity', Quantity(wort_gravity, 'sg'))", "def qi(self, tl, psi_l):\n\t try: \n\t ans = .622*esat(tl)/P_ATM*exp(psi_l*1000000.*VW/R/tl)\n\t except OverflowError:\n\t ans = 0.\n\t return ans", "def psi_xf(self, ev, gp, psi_l):\n\t return ev*(1. - self.F_CAP)/(lai*gp) + psi_l", "def calP(self):\n N = len(self.listOfParticles)\n m = self.listOfParticles[0].m\n vsum = 0\n for particle in self.listOfParticles:\n vsum += particle.V.len()\n A = np.pi*self.R**2\n F = 0.5 * A * (2*self.R) * m * N * vsum**2\n return F", "def calculate_axial_transport(my_cell, t):\n\n phi_si, phi_se, phi_di, phi_de, phi_sm, phi_dm = my_cell.membrane_potentials()\n\n j_Na_diff_i = my_cell.j_k_diff(my_cell.D_Na, my_cell.lamda_i, my_cell.Na_si, my_cell.Na_di)*my_cell.A_i*N_A\n Na_akkum_diff_i = scipy.integrate.cumtrapz(j_Na_diff_i, t, initial=0)\n\n j_Na_drift_i = my_cell.j_k_drift(my_cell.D_Na, my_cell.Z_Na, my_cell.lamda_i, my_cell.Na_si, my_cell.Na_di, phi_si, phi_di)*my_cell.A_i*N_A\n Na_akkum_drift_i = scipy.integrate.cumtrapz(j_Na_drift_i, t, initial=0)\n\n j_K_diff_i = my_cell.j_k_diff(my_cell.D_K, my_cell.lamda_i, my_cell.K_si, my_cell.K_di)*my_cell.A_i*N_A\n K_akkum_diff_i = scipy.integrate.cumtrapz(j_K_diff_i, t, initial=0)\n\n j_K_drift_i = my_cell.j_k_drift(my_cell.D_K, my_cell.Z_K, my_cell.lamda_i, my_cell.K_si, my_cell.K_di, phi_si, phi_di)*my_cell.A_i*N_A\n K_akkum_drift_i = scipy.integrate.cumtrapz(j_K_drift_i, t, initial=0)\n\n j_Cl_diff_i = my_cell.j_k_diff(my_cell.D_Cl, my_cell.lamda_i, my_cell.Cl_si, my_cell.Cl_di)*my_cell.A_i*N_A\n Cl_akkum_diff_i = scipy.integrate.cumtrapz(j_Cl_diff_i, t, initial=0)\n\n j_Cl_drift_i = my_cell.j_k_drift(my_cell.D_Cl, my_cell.Z_Cl, my_cell.lamda_i, my_cell.Cl_si, my_cell.Cl_di, phi_si, phi_di)*my_cell.A_i*N_A\n Cl_akkum_drift_i = scipy.integrate.cumtrapz(j_Cl_drift_i, t, initial=0)\n\n j_Ca_diff_i = my_cell.j_k_diff(my_cell.D_Ca, my_cell.lamda_i, my_cell.free_Ca_si, my_cell.free_Ca_di)*my_cell.A_i*N_A\n Ca_akkum_diff_i = scipy.integrate.cumtrapz(j_Ca_diff_i, t, initial=0)\n\n j_Ca_drift_i = my_cell.j_k_drift(my_cell.D_Ca, my_cell.Z_Ca, my_cell.lamda_i, my_cell.free_Ca_si, my_cell.free_Ca_di, phi_si, phi_di)*my_cell.A_i*N_A\n Ca_akkum_drift_i = scipy.integrate.cumtrapz(j_Ca_drift_i, t, initial=0)\n\n j_e_diff_i = (j_Na_diff_i + j_K_diff_i + 2*j_Ca_diff_i - j_Cl_diff_i)\n j_e_drift_i = (j_Na_drift_i + j_K_drift_i + 2*j_Ca_drift_i - j_Cl_drift_i)\n e_akkum_diff_i = (Na_akkum_diff_i*my_cell.Z_Na + K_akkum_diff_i*my_cell.Z_K + Cl_akkum_diff_i*my_cell.Z_Cl + Ca_akkum_diff_i*my_cell.Z_Ca)\n e_akkum_drift_i = (Na_akkum_drift_i*my_cell.Z_Na + K_akkum_drift_i*my_cell.Z_K + Cl_akkum_drift_i*my_cell.Z_Cl + Ca_akkum_drift_i*my_cell.Z_Ca)\n\n j_Na_diff_e = my_cell.j_k_diff(my_cell.D_Na, my_cell.lamda_e, my_cell.Na_se, my_cell.Na_de)*my_cell.A_e*N_A\n Na_akkum_diff_e = scipy.integrate.cumtrapz(j_Na_diff_e, t, initial=0)\n\n j_Na_drift_e = my_cell.j_k_drift(my_cell.D_Na, my_cell.Z_Na, my_cell.lamda_e, my_cell.Na_se, my_cell.Na_de, phi_se, phi_de)*my_cell.A_e*N_A\n Na_akkum_drift_e = scipy.integrate.cumtrapz(j_Na_drift_e, t, initial=0)\n\n j_K_diff_e = my_cell.j_k_diff(my_cell.D_K, my_cell.lamda_e, my_cell.K_se, my_cell.K_de)*my_cell.A_e*N_A\n K_akkum_diff_e = scipy.integrate.cumtrapz(j_K_diff_e, t, initial=0)\n\n j_K_drift_e = my_cell.j_k_drift(my_cell.D_K, my_cell.Z_K, my_cell.lamda_e, my_cell.K_se, my_cell.K_de, phi_se, phi_de)*my_cell.A_e*N_A\n K_akkum_drift_e = scipy.integrate.cumtrapz(j_K_drift_e, t, initial=0)\n\n j_Cl_diff_e = my_cell.j_k_diff(my_cell.D_Cl, my_cell.lamda_e, my_cell.Cl_se, my_cell.Cl_de)*my_cell.A_e*N_A\n Cl_akkum_diff_e = scipy.integrate.cumtrapz(j_Cl_diff_e, t, initial=0)\n\n j_Cl_drift_e = my_cell.j_k_drift(my_cell.D_Cl, my_cell.Z_Cl, my_cell.lamda_e, my_cell.Cl_se, my_cell.Cl_de, phi_se, phi_de)*my_cell.A_e*N_A\n Cl_akkum_drift_e = scipy.integrate.cumtrapz(j_Cl_drift_e, t, initial=0)\n\n j_Ca_diff_e = my_cell.j_k_diff(my_cell.D_Ca, my_cell.lamda_e, my_cell.Ca_se, my_cell.Ca_de)*my_cell.A_e*N_A\n Ca_akkum_diff_e = scipy.integrate.cumtrapz(j_Ca_diff_e, t, initial=0)\n\n j_Ca_drift_e = my_cell.j_k_drift(my_cell.D_Ca, my_cell.Z_Ca, my_cell.lamda_e, my_cell.Ca_se, my_cell.Ca_de, phi_se, phi_de)*my_cell.A_e*N_A\n Ca_akkum_drift_e = scipy.integrate.cumtrapz(j_Ca_drift_e, t, initial=0)\n\n j_e_diff_e = (j_Na_diff_e + j_K_diff_e + 2*j_Ca_diff_e - j_Cl_diff_e)\n j_e_drift_e = (j_Na_drift_e + j_K_drift_e + 2*j_Ca_drift_e - j_Cl_drift_e)\n e_akkum_diff_e = (Na_akkum_diff_e*my_cell.Z_Na + K_akkum_diff_e*my_cell.Z_K + Cl_akkum_diff_e*my_cell.Z_Cl + Ca_akkum_diff_e*my_cell.Z_Ca)\n e_akkum_drift_e = (Na_akkum_drift_e*my_cell.Z_Na + K_akkum_drift_e*my_cell.Z_K + Cl_akkum_drift_e*my_cell.Z_Cl + Ca_akkum_drift_e*my_cell.Z_Ca)\n\n return j_e_drift_i, j_e_diff_i, e_akkum_drift_i, e_akkum_diff_i, Na_akkum_drift_i, Na_akkum_diff_i, K_akkum_drift_i, K_akkum_diff_i, Cl_akkum_drift_i, Cl_akkum_diff_i, Ca_akkum_drift_i, Ca_akkum_diff_i, \\\n j_e_drift_e, j_e_diff_e, e_akkum_drift_e, e_akkum_diff_e, Na_akkum_drift_e, Na_akkum_diff_e, K_akkum_drift_e, K_akkum_diff_e, Cl_akkum_drift_e, Cl_akkum_diff_e, Ca_akkum_drift_e, Ca_akkum_diff_e", "def phi(t, *args):\n # Unpacking data\n mu_1, pi_mu_2, distance, affine_transfo = args\n A, b = get_Ab(t)\n N = len(mu_1)\n assert len(mu_1) == len(pi_mu_2)\n # Computing value of objective function\n r = 0.\n for i in np.arange(N):\n r += distance(affine_transfo(A, b, mu_1[i]), pi_mu_2[i]) ** 2\n return r", "def psi_sx(z, x, beta):\n \n # beta**2 appears far more than beta. Use this in internal functions\n beta2 = beta**2\n\n alp = alpha(z, x, beta2)\n kap = sqrt(x**2 + 4*(1+x) * sin(alp)**2) # kappa(z, x, beta2) inline\n \n sin2a = sin(2*alp)\n cos2a = cos(2*alp) \n \n # psi_s calc\n out_psi_s = (cos2a - 1 / (1+x)) / (\n kap - beta * (1+x) * sin2a) \n \n # psi_x calc \n arg2 = -4 * (1+x) / x**2\n \n ellipeinc = ss.ellipeinc(alp, arg2)\n ellipkinc = ss.ellipkinc(alp, arg2) \n\n T1 = (1/abs(x)/(1 + x) * ((2 + 2*x + x**2) * ellipkinc - x**2 * ellipeinc))\n D = kap**2 - beta2 * (1 + x)**2 * sin2a**2\n T2 = ((kap**2 - 2*beta2 * (1+x)**2 + beta2 * (1+x) * (2 + 2*x + x**2) * cos2a)/ beta/ (1+x)/ D)\n T3 = -kap * sin2a / D\n T4 = kap * beta2 * (1 + x) * sin2a * cos2a / D\n T5 = 1 / abs(x) * ellipkinc # psi_phi without e/rho**2 factor\n out_psi_x = (T1 + T2 + T3 + T4) - 2 / beta2 * T5\n\n return out_psi_s, out_psi_x", "def approximate_Psi(L,N_scales,m): \r\n l_max = rough_l_max(L)\r\n (g, _, t) = filter_design(l_max, N_scales)\r\n arange = (0.0, l_max)\r\n \r\n c=[]\r\n for kernel in g:\r\n c.append(cheby_coeff(kernel, m, m+1, arange))\r\n\r\n # c2=[]\r\n # for s in range(N_scales+1):\r\n # c2.append(cheby_coeff2(m,s+1))\r\n\r\n psi=cheby_op2(L, c, arange)\r\n \r\n \r\n \r\n psi_inv=[]\r\n for i in range(N_scales+1):\r\n psi[i]=np.float32(psi[i]) # convert psi to float 32\r\n psi_inv.append(np.linalg.inv(psi[i]))\r\n \r\n return psi,psi_inv", "def _psi_function(share1, share2):\n return (share1 - share2) * math.log(share1/share2)", "def get_es(self):\n\t\tif not self.data_p.has_key(\"delta\"):\n\t\t\tself.getdelta()\n\t\tself.data_p[\"endsimmer\"] = self.data_p[\"tau_nuc\"]/self.data_p[\"tau_cdyn\"]*self.data_p[\"delta\"]**0.5", "def energy(p,m):\n return math.sqrt(p*p + m*m)", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def self_energy(gf_imp0, gf_imp):\n return 1/gf_imp0 - 1/gf_imp", "def set_psi(self):\n self.psi = float(dihedral(self.C1.getXYZ(), self.GO.getXYZ(), self.CX.getXYZ(), self.CX_minus_1.getXYZ()))", "def equations(self):\n k = 0\n ######################################################################\n # equations for fluid balance\n self.residual[k:k + self.num_nw_fluids * 2] = self.fluid_func()\n k += self.num_nw_fluids * 2\n\n ######################################################################\n # equations for mass flow balance\n self.residual[k:k + 2] = self.mass_flow_func()\n k += 2\n\n ######################################################################\n # equations for energy balance\n self.residual[k] = self.energy_func()\n k += 1\n\n ######################################################################\n # equations for specified heat transfer\n if self.Q.is_set:\n self.residual[k] = (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) - self.Q.val)\n k += 1\n\n ######################################################################\n # equations for specified heat transfer coefficient\n if self.kA.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.kA_func()\n k += 1\n\n ######################################################################\n # equations for specified heat transfer coefficient characteristic\n if self.kA_char.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.kA_char_func()\n k += 1\n\n ######################################################################\n # equations for specified upper terminal temperature difference\n if self.ttd_u.is_set:\n self.residual[k] = self.ttd_u_func()\n k += 1\n\n ######################################################################\n # equations for specified lower terminal temperature difference\n if self.ttd_l.is_set:\n self.residual[k] = self.ttd_l_func()\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio at hot side\n if self.pr1.is_set:\n self.residual[k] = (\n self.pr1.val * self.inl[0].p.val_SI - self.outl[0].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio at cold side\n if self.pr2.is_set:\n self.residual[k] = (\n self.pr2.val * self.inl[1].p.val_SI - self.outl[1].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified zeta at hot side\n if self.zeta1.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(\n zeta='zeta1', inconn=0, outconn=0)\n k += 1\n\n ######################################################################\n # equations for specified zeta at cold side\n if self.zeta2.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(\n zeta='zeta2', inconn=1, outconn=1)\n k += 1\n\n ######################################################################\n # additional equations\n self.additional_equations(k)", "def estimated_energy(self):\n energy = 0j\n for pauli_string, coef in self._pauli_coef_terms:\n a = self._zeros[pauli_string]\n b = self._ones[pauli_string]\n if a + b:\n energy += coef * (a - b) / (a + b)\n energy = complex(energy)\n if energy.imag == 0:\n energy = energy.real\n energy += self._identity_offset\n return energy", "def energy(self):\n self.E = - np.sum(self.phi) + 0.5 * self.mass * np.sqrt((self.v_x ** 2 + self.v_y **2))", "def P(self,numSeg):\n w=self.x/numSeg\n return (w/3)*(self.F(0)+self.sumaImpar(numSeg,w)+self.sumaPar(numSeg,w)+self.F(self.x))", "def OxygenTransmission(T,P,n_wv,wavelength,dr,freq_lim=np.array([lp.c/770e-9,lp.c/768e-9]),sim_nu=np.array([]),spec_file=''):\n # fraction of O2 by number density\n fO2 = (32*0.2320+28.02*0.7547+44.01*0.00046+39.94*0.0128+20.18*0.000012+4.0*0.0000007+83.8*0.000003+131.29*0.00004)*0.2320/32.0\n \n if len(spec_file) == 0:\n spec_file = '/Users/mhayman/Documents/DIAL/O2_HITRAN2012_760_781.txt'\n \n if sim_nu.size==0:\n sim_nu = np.arange(-3e9,3e9,20e6)\n \n# inu0 = np.argmin(np.abs(sim_nu)) # index to center of frequency array\n \n n_o2=fO2*(P/(lp.kB*T)-n_wv) # to convert atm to Pa use *101325\n ext_o2 = rb.ExtinctionFromHITRAN(lp.c/wavelength+sim_nu,T,P,(mO2*1e-3)/lp.N_A,nuLim=freq_lim,freqnorm=True,filename=spec_file).T\n T_o2 = np.exp(-np.cumsum(n_o2[np.newaxis,:]*ext_o2,axis=1)*dr)\n \n return T_o2,sim_nu", "def theorem (self):\n tot_energy = 0.\n tot_energy = 2.0*self.total_kin_energy()-self.tot_pot_energy() # 'tot_energy' is the quantity conserved in the system defined by the Virial theorem.\n\n return(tot_energy)", "def upsilon_phi( self ) :\n from GaudiConfUtils.ConfigurableGenerators import DaVinci__N3BodyDecays \n #\n return self.make_selection (\n 'Y&KK' ,\n DaVinci__N3BodyDecays ,\n [ self.upsilons() , self.kaons() ] ,\n ## algorithm properties \n DecayDescriptor = \" Upsilon(4S) -> J/psi(1S) K+ K-\" ,\n Combination12Cut = \"\"\"\n ( AM < 15 * GeV ) &\n ( ACHI2DOCA(1,2) < 16 )\n \"\"\" ,\n CombinationCut = \"\"\"\n ( AM < 15 * GeV ) &\n ( AM23 < 1050 * MeV ) & \n ( ( AM - AM1 - AM23 ) < 2.5 * GeV ) &\n ( ACHI2DOCA(1,3) < 16 ) &\n ( ACHI2DOCA(2,3) < 16 )\n \"\"\" ,\n MotherCut = \" chi2vxndf<10\" ,\n )", "def solucaoEspaco(arq,t,n,xf):\n \n parametros = open(arq,'r')\n lista = []\n for linha in parametros:\n valores = linha.split()\n lista.append( valores[1] )\n Ks = float(lista[0]) # Conductibilidade na fase solida\n Kl = float(lista[1]) # Conductibilidade na fase liquida\n Tf = float(lista[2]) # temperatura de mudanca de fase\n Tinf = float(lista[3]) # Temperatura inicial na fase liquida T0\n Tw = float(lista[4]) # Temperatura na fronteira no instante inicial\n L = float(lista[5]) # calor latente\n cs = float(lista[6]) # calor especifico na zona solida\n cl = float(lista[7]) # calor especifico na zona liquida\n cf = float(lista[8]) # calor especifico na zona de mudanca de fase\n p = float(lista[9]) # pho = massa especifica\n parametros.close() \n\n lam = 0.5064 # chute inicial \n T = np.zeros(n+1) # temperaturas\n x = np.linspace(0.0,xf,n+1) # espaco\n\n T[0] = Tw\n for i in range(1,n+1):\n xx = x[i]\n lamb = calculaLambda(funcao,lam,Tinf,Tf,Ks,Kl,L,cs,Tw)\n frente = X_espaco(lamb,Ks,t)\n if (frente > xx):\n T[i] = temperaturaZonaSolida_espaco(Tf,Ks,lamb,xx,t,Tw)\n elif (frente < xx):\n T[i] = temperaturaZonaLiquida_espaco(Tinf,Tf,Ks,Kl,lamb,xx,t)\n else:\n T[i] = Tf\n lam = lamb\n return x,T", "def circuit():\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))", "def value(self): \r\n c = self.nd1() * self.s * math.exp(-self.div * self.t)\r\n c -= self.nd2() * self.x * math.exp(-self.rf * self.t)\r\n \r\n return c", "def newtonian_profile(PSI):\n\n U = dot(MDX, PSI)\n V = - dot(MDY, PSI)\n VGRAD = dot(U,MDX) + dot(V,MDY)\n\n BPFEQNS = zeros((3*vecLen, 3*vecLen), dtype='D')\n # Cxx eqn\n # Cxx\n BPFEQNS[0:vecLen, 0:vecLen] = Nu*MDX - VGRAD \\\n + 2*tsm.c_prod_mat(dot(MDX,U)) - oneOverWi*II\n # Cyy\n BPFEQNS[0:vecLen, vecLen:2*vecLen] = 0\n # Cxy\n BPFEQNS[0:vecLen, 2*vecLen:3*vecLen] = 2*tsm.c_prod_mat(dot(MDY, U))\n # Cyy eqn\n # Cxx\n BPFEQNS[vecLen:2*vecLen, 0:vecLen] = 0\n # Cyy\n BPFEQNS[vecLen:2*vecLen, vecLen:2*vecLen] = Nu*MDX - VGRAD - oneOverWi*II\\\n + 2.*tsm.c_prod_mat(dot(MDY, V))\n # Cxy\n BPFEQNS[vecLen:2*vecLen, 2*vecLen:3*vecLen] = 2.*tsm.c_prod_mat(dot(MDX, V))\n #Cxy eqn\n # Cxx\n BPFEQNS[2*vecLen:3*vecLen, 0:vecLen] = tsm.c_prod_mat(dot(MDX, V))\n # Cyy \n BPFEQNS[2*vecLen:3*vecLen, vecLen:2*vecLen] = tsm.c_prod_mat(dot(MDY, U))\n # Cxy\n BPFEQNS[2*vecLen:3*vecLen, 2*vecLen:3*vecLen] = Nu*MDX - VGRAD - oneOverWi*II \n\n RHS = zeros(3*vecLen, dtype='D')\n RHS[0] = -oneOverWi\n RHS[vecLen] = -oneOverWi\n RHS[2*vecLen:3*vecLen] = 0\n\n soln = linalg.solve(BPFEQNS, RHS)\n\n Cxx = soln[0:vecLen]\n Cyy = soln[vecLen:2*vecLen]\n Cxy = soln[2*vecLen:3*vecLen]\n\n return Cxx, Cyy, Cxy", "def psi(x, a, q):\n T = q.shape[1]\n covmat = calculate_variance(x + tile(a, [T, 1]).T)\n psi1 = covmat[0, 0] - covmat[1, 1]\n psi2 = covmat[0, 1]\n psi3 = x[0, -1]\n psi4 = x[1, -1]\n\n return (psi1, psi2, psi3, psi4)", "def oppervlakte(self):\n x = self.r*self.r*pi\n return x" ]
[ "0.66981435", "0.6571289", "0.64863443", "0.64510655", "0.6450011", "0.63740313", "0.6351212", "0.6321734", "0.6310165", "0.62904805", "0.62640244", "0.623965", "0.61741835", "0.61319333", "0.6116678", "0.60592675", "0.6031023", "0.6012221", "0.600168", "0.59927183", "0.59659547", "0.59367836", "0.5933083", "0.59089637", "0.59055865", "0.5905166", "0.5872765", "0.5870351", "0.58688956", "0.5853626", "0.5798892", "0.5798885", "0.5796058", "0.5795559", "0.5789098", "0.5788066", "0.57779706", "0.5772213", "0.5758445", "0.5753075", "0.57303435", "0.5727305", "0.57242876", "0.5717336", "0.571353", "0.5711464", "0.57061154", "0.5705676", "0.57037145", "0.56854886", "0.56771785", "0.56728184", "0.5667879", "0.5647664", "0.56450486", "0.56430537", "0.5640545", "0.56401235", "0.56380826", "0.5637459", "0.56290144", "0.5628395", "0.5622673", "0.5606126", "0.5600553", "0.55977565", "0.55976444", "0.5592911", "0.55816853", "0.55809164", "0.5579743", "0.55705607", "0.55703646", "0.55600756", "0.55550176", "0.555476", "0.5550957", "0.55498344", "0.554861", "0.55484724", "0.5544155", "0.5544039", "0.554133", "0.5540643", "0.5540064", "0.553996", "0.5538499", "0.5538116", "0.55372226", "0.55256176", "0.55239075", "0.5522623", "0.5515617", "0.550844", "0.55022377", "0.54974174", "0.54959166", "0.54956734", "0.54916775", "0.5491439" ]
0.66243196
1
Route the incoming request based on type (LaunchRequest, IntentRequest, etc.) The JSON body of the request is provided in the event parameter.
def execute_request(self): print( self.LOG_CLASS, '[method: main]', 'MyCityDataModel received:\n', str(self._mcd) ) # TODO: This section should be generalized for all platforms if possible """ Uncomment this if statement and populate with your skill's application ID to prevent someone else from configuring a skill that sends requests to this function. """ # if (mcd.application_id != # "amzn1.echo-sdk-ams.app.[unique-value-here]"): # raise ValueError("Invalid Application ID") if self._mcd.is_new_session: self.on_session_started() if self._mcd.request_type == "LaunchRequest": return self.on_launch() elif self._mcd.request_type == "IntentRequest": return self.on_intent() elif self._mcd.request_type == "SessionEndedRequest": return self.on_session_ended()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_event(event, context):\n print(\"Executing...\")\n router = Router(ROUTE_MAP)\n return router.route_request(event, context)", "def lambda_handler(event, context):\r\n print(\"Incoming request...\")\r\n\r\n \"\"\"\r\n Uncomment this if statement and populate with your skill's application ID to\r\n prevent someone else from configuring a skill that sends requests to this\r\n function.\r\n \"\"\"\r\n # if (event['session']['application']['applicationId'] !=\r\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\r\n # raise ValueError(\"Invalid Application ID\")\r\n\r\n if event['session']['new']:\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"Incoming request...\")\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n if (event['session']['application']['applicationId'] !=\n \"amzn1.ask.skill.2994421a-75ef-4502-9d4a-bf83f20a7ade\"):\n raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\r\n print(\"event.session.application.applicationId=\" +\r\n event['session']['application']['applicationId'])\r\n\r\n\r\n if event['session']['new']:\r\n #print (\"**** Reached\")\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n #print(\"**** Intent coming is : \" + event['request']['type'])\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n if DEBUG:\n print(\"event : {}\".format(json.dumps(event)))\n\n if event['session']['new']:\n if DEBUG:\n print(\"on_session_started requestId=\" + event['request']['requestId'] + \", sessionId=\" + event['session']['sessionId'])\n if event['request']['type'] == \"LaunchRequest\":\n if DEBUG:\n print(\"on_launch requestId=\" + event['request']['requestId'] + \", sessionId=\" + event['session']['sessionId'])\n return get_start_end_response(False)\n elif event['request']['type'] == \"IntentRequest\":\n if DEBUG:\n print(\"on_intent requestId=\" + event['request']['requestId'] + \", sessionId=\" + event['session']['sessionId'])\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n if DEBUG:\n print(\"on_session_ended requestId=\" + event['request']['requestId'] + \", sessionId=\" + event['session']['sessionId'])", "def lambda_handler(event, context):\r\n print(\"event.session.application.applicationId=\" +\r\n event['session']['application']['applicationId'])\r\n\r\n\r\n if event['session']['new']:\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, _context):\n print('=====lambda handler started...')\n print(json.dumps(event))\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n # this will trigger if a one shot is used\n if event['request']['type'] == \"IntentRequest\":\n return on_launch(event['request'], event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n if event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n if event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def process_request(self, event, context):\n # if its a new session, run the new session code\n try:\n response = None\n if event['session']['new']:\n self.on_session_started({'requestId': event['request']['requestId']}, event['session'])\n\n # regardless of whether its new, handle the request type\n if event['request']['type'] == \"LaunchRequest\":\n response = self.on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n response = self.on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n response = self.on_session_ended(event['request'], event['session'])\n\n except Exception as exc:\n response = self.on_processing_error(event, context, exc)\n\n return response", "def handle_event(request):\n\n payload = json.loads(request.body)\n if payload['type'] == \"url_verification\":\n return JsonResponse({\"challenge\": payload['challenge']})\n elif payload['type'] == \"event_callback\":\n event = payload['event']\n if event['type'] == \"team_join\":\n slack_post(event['user']['id'], text=\"Welcome to LNL!\", content=views.welcome_message())\n elif event['type'] == \"app_home_opened\":\n load_app_home(event['user'])\n elif event['type'] == \"channel_created\":\n if settings.SLACK_AUTO_JOIN:\n join_channel(event['channel']['id'])\n return HttpResponse()\n return HttpResponse(\"Not implemented\")", "def lambda_handler(event, context):\n logging.info(\"Received event: \" + json.dumps(event, indent=2))\n request_type = event['RequestType']\n if request_type == 'Create':\n attach_policy(event, context)\n elif request_type == 'Delete':\n detach_policy(event, context)\n elif request_type == 'Update':\n update_policy(event, context)", "def lambda_handler(event, context):\n\tprint(\"event.session.application.applicationId=\" +\n\t\t event['session']['application']['applicationId'])\n\n\tif event['session']['new']:\n\t\ton_session_started({'requestId': event['request']['requestId']},\n\t\t\t\t\t\t event['session'])\n\n\tif event['request']['type'] == \"LaunchRequest\":\n\t\treturn on_launch(event['request'], event['session'])\n\telif event['request']['type'] == \"IntentRequest\":\n\t\treturn on_intent(event['request'], event['session'])\n\telif event['request']['type'] == \"SessionEndedRequest\":\n\t\treturn on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n \n if event['session']['new']: # if its a new session, go to on_session_started() funtion\n on_session_started({'requestId': event['request']['requestId']}, event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def process_event(event):\n\n # Extract the required parameters from the request body\n id = event.get('id')\n token = event.get('token')\n type = event.get('type')\n\n # If any of the required parameters are absent, reject the request.\n if (not id) or (not token) or (not type):\n app.logger.warn('Request rejected: Required field absent')\n abort(400)\n\n # Message type must be on the allowed types\n legal_types = ['STARTUP', 'DISPENSE', 'REFILL', 'REFILLED', 'EMPTY']\n if type not in legal_types:\n app.logger.warn('Request rejected: Invalid event type')\n abort(400)\n\n # Authenticate the dispenser against our database of tokens.\n if not permitted(event['id'], event['token']):\n app.logger.warn('Request rejected: Invalid authentication token')\n abort(401)\n\n # Dispatch event to appropriate handler\n if type == 'STARTUP':\n handle_startup(event)\n elif type == 'DISPENSE':\n handle_dispense(event)\n elif type == 'REFILL':\n handle_refill_request(event)\n elif type == 'REFILLED':\n handle_refilled(event)\n elif type == 'EMPTY':\n handle_empty(event)\n\n # Send the event to the dispensers Kafka topic\n # TODO Topic for startup, dispense, battery level, refill request\n # producer.send(type, event)", "def request_received(self, event):\n log.debug(\"request received, stream %s\", event.stream_id)", "def lambda_handler(event, context):\n #print(\"event.session.application.applicationId=\" + event['session']['application']['applicationId'])\n\n #if event['session']['new']:\n # on_session_started({'requestId': event['request']['requestId']},event['session'])\n \n intent = None\n try:\n intent = Intent(**event)\n return handle_intent(intent)\n except Exception as ex:\n err = traceback.format_exc()\n print(err)\n return error_handler_func(intent,msg=str(err))", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])\n else:\n print (\"********************** Unknown Request\")", "def handler(event, context):\n logger.info(json.dumps(event))\n params = event.get(\"queryStringParameters\")\n\n if params and \"ride-id\" in params:\n response = query_db(params[\"ride-id\"])\n return create_http_response(200, response)\n\n all_results = scan_db()\n time_results = strip_old_records(all_results)\n results = sort_results(time_results, params)\n if not params:\n return create_http_response(200, results)\n origin = params.get(\"origin\", \"\")\n destination = params.get(\"destination\", \"\")\n results = search_routes(results, origin, destination)\n\n return create_http_response(200, results)", "def lambda_handler(event, context):\n print('HANDLING EVENT')\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n logger.info(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"Check that this is being called by our skill\"\"\"\n logger.info(\"Calling app: \"+str(event['session']['application']['applicationId']))\n if (event['session']['application']['applicationId'] !=\n \"amzn1.ask.skill.\"+skill_id):\n logger.error(\"Invalid application ID\")\n raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started(event, {'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event, event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event, event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event, event['request'], event['session'])\n\n # Otherwise deal with it gracefully\n logger.info(\"Unexpected request type:\")\n logger.info(json.dumps(event))\n return build_response({}, build_speechlet_response(\"Leeds Bins\", \"Welcome to Leeds Bins. Now you can find out which waste bins to take out when. Try asking: what's my next collection.\", None, False))", "def handler(event, context):\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event):\r\n return 'Hello ' + event['queryParams']['name']", "def hello(event, context):\n\n user_id = event['session']['user']['userId']\n\n if os.getenv('LOGEVENTS', 'false') == \"true\":\n s3.put_object(\n ACL='public-read',\n Bucket=bucket,\n Key=\"logging/{}.json\".format(datetime.datetime.now().strftime(\"%H:%M:%S_on_%d_%B_%Y\")),\n Body=json.dumps(event),\n ContentType='application/json'\n )\n\n for k, v in event.items():\n print(k, v)\n\n if event['session']['new'] == True:\n on_session_started(event['request'], event['session'])\n\n response = None\n request_type = event['request']['type']\n\n if request_type == \"LaunchRequest\":\n response = on_launch(event['request'], event['session'])\n elif request_type == \"IntentRequest\":\n intent_name = event['request']['intent']['name']\n if intent_name == \"AMAZON.HelpIntent\":\n response = on_help(event['request'], event['session'])\n elif intent_name == \"AMAZON.CancelIntent\":\n response = on_session_ended(event['request'], event['session'])\n elif intent_name == \"AMAZON.StopIntent\":\n response = on_session_ended(event['request'], event['session'])\n else:\n response = on_intent(event['request'], event['session'])\n elif request_type == \"SessionEndedRequest\":\n response = on_session_ended(event['request'], event['session'])\n\n return response", "def handler(event, context):\n if event['Records'][0]['Sns']['Message'] is None:\n _print_info('Unrecognized event, function will not be executed. Enable debug to log the actual event.')\n _print_debug('event: {}'.format(event))\n return\n\n message = event['Records'][0]['Sns']['Message']\n _print_debug('message received: {}'.format(message))\n\n event = json.loads(message)\n _print_info('event: {}'.format(json.dumps(event)))\n\n if event[ACTION] in ALLOWED_ACTIONS:\n\n _print_info('Requested action: {}'.format(event[ACTION]))\n\n _print_info('Initializing.')\n _init_vars_()\n\n # create a hive cursor which can be passed around and then closed when done.\n cursor = _create_hive_cursor()\n\n if event[ACTION] == FULL_SYNC:\n _sync_all(cursor)\n if event[ACTION] == DELTA_SYNC:\n if event[USER] and event[NAMESPACE]:\n _sync_delta(cursor, event[USER], event[NAMESPACE])\n else:\n _print_error(\n 'Invalid request. Expecting both: a valid \\'{}\\' and a valid \\'{}\\''.format(\n USER, NAMESPACE))\n\n # close the hive cursor when done\n _close_hive_cursor(cursor)\n else:\n _print_error(\n 'Unknown action. Expecting one of: \\'{}\\', \\'{}\\''.format(FULL_SYNC,\n DELTA_SYNC))", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n #if (event['session']['application']['applicationId'] != \"<APPLICATION_ID>\"):\n # raise ValueError(\"Invalid Application ID\")\n\n\n if event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n #Don't let anyone else's skill send requests to this lambda\n if (event['session']['application']['applicationId'] !=\n \"amzn1.echo-sdk-ams.app.[application_id_goes_here]\"):\n raise ValueError(\"Invalid Application ID\")\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n logger.info('got event{}'.format(event))\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print 'Received event: ' + json.dumps(event, indent=2)\n print \"Context log stream: \"+ context.log_stream_name\n\n try:\n filename = get_latest_agent_filename()\n download_agent_if_missing(filename)\n prepare_agent_input_data(event, context)\n run_agent(filename)\n\n except URLError as ex:\n print 'Error: ', ex", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n league = brasileirao.get()\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'], league)\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n # Have to add 'attributes' entry into the event session since it could not find\n # it from a physical device even though it worked in the online testing\n if 'attributes' not in event['session']:\n event['session']['attributes'] = {}\n\n global session_attributes\n session_attributes = event['session']['attributes']\n\n if event['session']['new']:\n on_session_start()\n\n return request_to_handler[event['request']['type']](event)", "def on_intent(request, session):\n\n intent_name = request['intent']['name']\n \n # process the intents\n if intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n \n elif intent_name == \"AMAZON.StopIntent\":\n return get_stop_response()\n \n elif intent_name == \"AMAZON.CancelIntent\":\n return get_stop_response()\n \n elif intent_name == \"AMAZON.FallbackIntent\":\n return get_fallback_response()\n \n elif intent_name == \"recognizeDates\":\n slots = request['intent']['slots']\n date_start_slot = slots.get('dateStart',{'value':'NA'}).get('value','NA')\n date_end_slot = slots.get('dateEnd',{'value':'NA'}).get('value','NA')\n\n return get_intent_response(date_start_slot,date_end_slot)\n \n elif intent_name == \"PollHprofs\":\n slots = request['intent'].get('slots','')\n print(slots)\n speechOutput = \"Under development\"\n return response(speech_response(speechOutput, True))\n\n elif intent_name == \"SpinVMs\":\n slots = request['intent'].get('slots','')\n print(slots)\n speechOutput = \"Under development\"\n return response(speech_response(speechOutput, True))\n\n else:\n print(\"For invalid Intents reply with help\")\n return get_help_response()", "def lambda_handler(event, context):\n\n operations = {\n 'POST': main,\n }\n\n if event.get('httpMethod', False):\n operation = event['httpMethod']\n else:\n operation = \"not available\"\n\n payload = base64.b64decode(event['body'])\n try:\n payload = json.loads(payload)\n except TypeError:\n pass\n\n if operation in operations:\n return respond(None, operations[operation](payload))\n else:\n return respond(ValueError(f'Unsupported method {operation}'))", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" + event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']}, event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\r\n print(\"event.session.application.applicationId=\" +\r\n event['session']['application']['applicationId'])\r\n\r\n \"\"\"\r\n Uncomment this if statement and populate with your skill's application ID to\r\n prevent someone else from configuring a skill that sends requests to this\r\n function.\r\n \"\"\"\r\n # if (event['session']['application']['applicationId'] !=\r\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\r\n # raise ValueError(\"Invalid Application ID\")\r\n\r\n if event['session']['new']:\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\r\n print(\"event.session.application.applicationId=\" +\r\n event['session']['application']['applicationId'])\r\n\r\n \"\"\"\r\n Uncomment this if statement and populate with your skill's application ID to\r\n prevent someone else from configuring a skill that sends requests to this\r\n function.\r\n \"\"\"\r\n # if (event['session']['application']['applicationId'] !=\r\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\r\n # raise ValueError(\"Invalid Application ID\")\r\n\r\n if event['session']['new']:\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'], state)\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'], state)\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "async def _handle_request(self, request: web.Request) -> web.Response:\n event = await request.json()\n # This handler will be called on the server thread. Call the external\n # handler on the app thread.\n self._main_loop.call_soon_threadsafe(self.handle_event, event)\n return web.Response(text=\"OK\")", "def lambda_handler(event, context):\r\n if 'session' in event:\r\n print(\"event.session.application.applicationId=\" +\r\n event['session']['application']['applicationId'])\r\n\r\n \"\"\"\r\n Uncomment this if statement and populate with your skill's application ID to\r\n prevent someone else from configuring a skill that sends requests to this\r\n function.\r\n \"\"\"\r\n if ('session' in event and (event['session']['application']['applicationId'] !=\r\n \"amzn1.ask.skill.57119d91-fb3c-487f-be53-4e7fac12fb83\")):\r\n raise ValueError(\"Invalid Application ID\")\r\n\r\n \"\"\"if event['session']['new']:\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\"\"\"\r\n\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])\r\n elif event['request']['type'] == 'UPDATE':\r\n return saveCoffeeMachineStatus(event['request'])\r\n elif event['request']['type'] == \"GLASS\":\r\n return glassStatus(event['request'])\r\n elif event['request']['type'] == \"WATER\":\r\n return waterStatus(event['request'])\r\n elif event['request']['type'] == \"COFFEE\":\r\n return coffeeStatus(event['request'])\r\n elif event['request']['type'] == \"ON_OFF\":\r\n return on_off_status(event['request'])\r\n elif event['request']['type'] == \"ONLINE\":\r\n return online_status_f(event['request'])\r\n elif event['request']['type'] == 'BUSY':\r\n return busyStatus(event['request'])", "def on_intent(event, intent_request, session):\n\n logger.info(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent == \"NextBins\":\n return get_next_collection(event, session)\n\n return get_generic_welcome_message()", "def launch_request_handler(handler_input: HandlerInput) -> Response:\n day = events.get_date()\n text = events.for_day(day)\n log.info(f\"launch: events for {day} = {text}\")\n return (\n handler_input.response_builder.speak(text)\n .set_card(SimpleCard(f\"Hillbrook events for {day.strftime('%A')}:\\n{text}\"))\n .set_should_end_session(True)\n .response\n )", "def lambda_handler(event, context):\n\n retval = {}\n\n # retrieve event information (i.e. station name and direction)\n station = get_origin_name(event)\n destination = get_destination(event)\n query_direction = get_direction(event).title()\n\n # finds abbreviation for origin and dest station\n query_orig = get_station_abbr(station)\n if destination:\n query_dest = get_station_abbr(destination)\n return dest_route(query_orig, query_dest, station, destination)\n\n else:\n return direction_route(query_orig, query_direction, station)", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n if (event['session']['application']['applicationId'] !=\n \"amzn1.ask.skill.xxxx\"):\n #Set Alexa Skill ID\n raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n # By default, treat the user request as coming from the America/New_York time zone.\n \n os.environ['TZ'] = 'America/New_York'\n time.tzset()\n logger.debug('event.bot.name={}'.format(event['bot']['name']))\n intent_name = event['currentIntent']['name']\n \n return dispatch(event)", "def process(self, request, **kwargs):\n\n if len(request):\n\n REST_header = \"\"\n REST_verb = \"\"\n str_path = \"\"\n json_payload = \"\"\n\n self.dp.qprint(\"Listener ID - %s: process() - handling request\" % (self.worker_id))\n\n now = datetime.datetime.today()\n str_timeStamp = now.strftime('%Y-%m-%d %H:%M:%S.%f')\n self.dp.qprint(Colors.YELLOW)\n self.dp.qprint(\"***********************************************\")\n self.dp.qprint(\"***********************************************\")\n self.dp.qprint(\"%s incoming data stream\" % (str_timeStamp) )\n self.dp.qprint(\"***********************************************\")\n self.dp.qprint(\"len = %d\" % len(request))\n self.dp.qprint(\"***********************************************\")\n self.dp.qprint(Colors.CYAN + \"%s\\n\" % (request.decode()) + Colors.YELLOW)\n self.dp.qprint(\"***********************************************\" + Colors.NO_COLOUR)\n l_raw = request.decode().split('\\n')\n FORMtype = l_raw[0].split('/')[0]\n\n self.dp.qprint('Request = ...')\n self.dp.qprint(l_raw)\n REST_header = l_raw[0]\n REST_verb = REST_header.split()[0]\n str_path = REST_header.split()[1]\n json_payload = l_raw[-1]\n\n # remove trailing '/' if any on path\n if str_path[-1] == '/': str_path = str_path[0:-1]\n\n d_ret = {}\n d_ret['status'] = False\n d_ret['RESTheader'] = REST_header\n d_ret['RESTverb'] = REST_verb\n d_ret['action'] = \"\"\n d_ret['path'] = str_path\n d_ret['receivedByServer'] = l_raw\n\n if REST_verb == 'GET':\n d_ret['GET'] = self.DB_get(path = str_path)\n d_ret['status'] = True\n\n self.dp.qprint('json_payload = %s' % json_payload)\n d_ret['client_json_payload'] = json_payload\n d_ret['client_json_len'] = len(json_payload)\n if len(json_payload):\n d_payload = json.loads(json_payload)\n d_request = d_payload['payload']\n payload_verb = d_request['action']\n if 'meta' in d_request.keys():\n d_meta = d_request['meta']\n d_ret['payloadsize']= len(json_payload)\n\n if payload_verb == 'quit':\n self.dp.qprint('Shutting down server...')\n d_ret['status'] = True\n\n if payload_verb == 'run' and REST_verb == 'PUT':\n d_ret['action'] = payload_verb\n self.processPUT( request = d_request)\n d_ret['status'] = True\n\n if REST_verb == 'POST':\n self.processPOST( request = d_request,\n ret = d_ret)\n return d_ret\n else:\n return False", "def pre_runroute_callable(self, route, request):\n\n #request.logevent(EInfo(\"pre_runroute_callable Request URL: {0} from {1}.\".format(request.get_full_path(), request.get_remote_addr())))\n # ATTN: test, let's trigger a signal\n if (False):\n id = 'signal.site.pre_runroute'\n message = {'route':route}\n source = None\n flag_collectresults = True\n signalresults = self.comp('signalmanager').broadcast(id, message, request, source, flag_collectresults)\n return None", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n event['session']['attributes'] = {\"convoState\" : 1}\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def on_launch(event_request, session):\n print(\"=====on_launch requestId: \" + event_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n return play_new_game(False)", "def serve(self, event: Dict) -> Union[MSG_RETURN, None]:\n raw_msg = event['content']['body']\n for k in self.routes.keys():\n m = re.search(k, raw_msg, re.IGNORECASE)\n\n if m:\n\n matches = m.groupdict()\n route = matches.get('route')\n msg = matches.get('msg')\n\n func = self.routes.get(k)\n\n if func:\n\n logger.info(\n (\n 'matched route %s '\n 'with msg %s '\n 'from %s '\n 'and triggered \"%s\"'\n ),\n route, msg, raw_msg, func.__name__\n )\n\n return func(route, msg, event)\n\n return None", "def handle(request_body, project):\n handler_strategy = handlers_collection.get_strategy(request_body)\n\n if handler_strategy is not None:\n return handler_strategy.handle(request_body, project)\n\n raise NotSupportedEventTypeBadRequest(request_body.get('event'))", "def lambda_handler(event: APIGatewayProxyEvent, context: LambdaContext) -> Dict[str, Any]:\n\n return app.resolve(event, context)", "def handle_event(self, event):\n return HttpResponse(\n content=f'Unhandled webhook received: {event[\"type\"]}',\n status=200)", "def hello_world(\n event: Dict[str, Any],\n context,\n):\n body_str = event.get(\"body\", \"{}\")\n body_str = body_str if body_str else \"{}\"\n body_obj = json.loads(body_str)\n wiki_search_term = body_obj.get(\"searchTerm\", \"\")\n if not body_obj or not wiki_search_term:\n # https://docs.aws.amazon.com/apigateway/latest/developerguide/handle-errors-in-lambda-integration.html\n response = {\n \"statusCode\": 400,\n \"headers\": {\"Content-Type\": \"application/json\"},\n \"body\": json.dumps({\"message\": \"Wikipedia search term was not provided\"}),\n }\n else:\n summary = wikipedia.summary(wiki_search_term)\n response = {\n \"statusCode\": 200,\n \"headers\": {\"Content-Type\": \"application/json\"},\n \"body\": json.dumps(summary),\n }\n # https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format\n return response", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n print(intent)\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"WhoIs\":\n intent_search = intent['slots']['ASN']['value']\n return whois(intent_search)\n elif intent_name == \"WherePeer\":\n intent_search = intent['slots']['company']['value']\n return wherePeer(intent_search)\n elif intent_name == \"WhoPeers\":\n intent_search = intent['slots']['IX']['value']\n return whoPeers(intent_search)\n elif intent_name == \"WhosAt\":\n intent_search = intent['slots']['facility']['value']\n return whosAt(intent_search)\n elif intent_name == \"RouteServers\":\n return routeServers()\n else:\n raise ValueError(\"Invalid intent\")", "def route( request, c ):", "def lambda_handler(event, context):\n\n event_body = json.loads(event['body'])\n print(\"EVENT:\")\n print(event_body)\n\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n\n recs = flow(event_body, textract, cache = True)\n rval = {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\" : \"hello world\",\n \"textract\" : recs\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }\n\n return rval", "def on_launch(launch_request, session):\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def handler(context, event):\n\n if _ensure_str(event.trigger.kind) != 'http' or _invoked_by_cron(event):\n body = event.body.decode('utf-8')\n context.logger.info('Received event body: {0}'.format(body))\n\n # serialized record\n serialized_record = json.dumps({\n 'body': body,\n 'headers': {\n _ensure_str(header): _ensure_str(value)\n for header, value in event.headers.items()\n },\n 'timestamp': datetime.datetime.utcnow().isoformat(),\n })\n\n # store in log file\n with open(events_log_file_path, 'a') as events_log_file:\n events_log_file.write(serialized_record + ', ')\n\n else:\n\n # read the log file\n try:\n with open(events_log_file_path, 'r') as events_log_file:\n events_log_file_contents = events_log_file.read()\n except IOError:\n events_log_file_contents = ''\n\n # make this valid JSON by removing last two chars (, ) and enclosing in [ ]\n encoded_event_log = '[' + events_log_file_contents[:-2] + ']'\n\n context.logger.info('Returning events: {0}'.format(encoded_event_log))\n\n # return json.loads(encoded_event_log)\n return encoded_event_log", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response(session)", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def lambda_handler(event, context):\n\n \"\"\"\n This statement prevents someone else from configuring a skill that sends \n requests to this function.\n \"\"\"\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n print(\"on_launch requestId=\" + launch_request['requestId'] + \", sessionId=\" + session['sessionId'])\n \n # Dispatch to your skill's launch\n return get_welcome_response(session)", "def on_launch(event, launch_request, session):\n\n logger.info(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n return get_generic_welcome_message()", "async def rest_handler(request):\n # verify the request\n valid, reason = await verify_rest_request(request)\n if not valid:\n return generate_error(reason, 400)\n json = await request.json()\n # get the parameters\n cmd = json['cmd']\n params = json['params']\n # pass off to the correct target handler\n if cmd == 'find':\n response = await _find_handler(request, params)\n elif cmd == 'stats':\n response = await _stats_handler(request, params)\n elif cmd == 'download':\n response = await _download_handler(request, params)\n elif cmd == 'upload':\n response = await _upload_handler(request, params)\n elif cmd == 'provision':\n response = await _provision_handler(request, params)\n # return the response we get back fgrom the handler\n return response", "def on_launch(launch_request, session):\r\n\r\n #print(\"****on_launch requestId=\" + launch_request['requestId'] +\r\n # \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()", "def lambda_handler(event, context):\n # define initial status code and headers\n statusCode = 400\n try:\n # get the body params\n if type(event) == dict:\n event_body = event.get('body', event)\n else:\n event_body = json.loads(event).get('body', {})\n # generate and store the reservation response result from reservation handler function\n reservation_handler = ReservationHandler(EventBodyData=event_body)\n result = reservation_handler.sabre_reservation_handler()\n # define status code, headers and response\n if type(result) == dict:\n statusCode = result.get(\"statusCode\", statusCode)\n response = result.get(\"body\", \"\")\n else:\n response = result\n except Exception as E:\n response = str(E)\n\n # return the response\n return {\n 'statusCode': statusCode,\n 'body': response\n }", "def on_launch(launch_request, session):\r\n\r\n print(\"on_launch requestId=\" + launch_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()", "def on_launch(launch_request, session):\r\n\r\n print(\"on_launch requestId=\" + launch_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()", "def on_launch(launch_request, session):\r\n\r\n print(\"on_launch requestId=\" + launch_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()", "def on_launch(launch_request, session):\r\n print(\"on_launch requestId=\" + launch_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()", "def zari_webhook(request):\n request_json = request.get_json()\n print(request_json)\n if request_json[\"queryResult\"] != None:\n text = request_json[\"queryResult\"][\"queryText\"]\n intent = request_json[\"queryResult\"][\"intent\"][\"displayName\"]\n print(intent)\n print(intent == \"VerZapatos\")\n parameters = request_json[\"queryResult\"][\"parameters\"]\n \n if intent == \"VerZapatos\":\n return compose_response(ver_zapatos(parameters))\n \n return compose_response(f\"Hola mundo sin intent {text}\")\n\n #if request.args and 'message' in request.args:\n # return request.args.get('message')\n #elif request_json and 'message' in request_json:\n # return request_json['message']\n #else:\n # return f'Hello World!'", "def post_runroute_callable(self, request):\n return None", "def on_intent(event):\n\n intent = event[\"request\"][\"intent\"][\"name\"]\n\n if intent in (\"AMAZON.CancelIntent\", \"AMAZON.StopIntent\", \"AMAZON.NoIntent\"):\n return handle_session_end_request()\n\n if intent == \"AMAZON.YesIntent\":\n if \"attributes\" in event[\"session\"] and \"previousIntent\" in \\\n event[\"session\"][\"attributes\"]:\n\n if event[\"session\"][\"attributes\"][\"previousIntent\"] == \"AMAZON.HelpIntent\":\n return main_handler(event)\n\n speech_output = event[\"session\"][\"attributes\"][\"nextStations\"]\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n speech_output = \"Sorry, something went wrong.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n if intent == \"isBikesAvailable\":\n return main_handler(event)\n\n if intent == \"AMAZON.HelpIntent\":\n return handle_help_intent()\n\n speech_output = \"Sorry, I don\\'t know that.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)", "def RequestHandler_start(self):\n # check for X-Trace header in HTTP request\n ctx, ev = oboe.Context.start_trace('tornado', xtr=self.request.headers.get(\"X-Trace\"), avw=self.request.headers.get(\"X-TV-Meta\"))\n\n if hasattr(self, '__class__') and hasattr(self.__class__, '__name__'):\n ev.add_info(\"Controller\", self.__class__.__name__)\n ev.add_info(\"Action\", self.request.method.lower())\n ev.add_info(\"URL\", self.request.uri)\n ev.add_info(\"Method\", self.request.method)\n ev.add_info(\"HTTP-Host\", self.request.host)\n ctx.report(ev)\n\n # create & store finish event for reporting later\n self.request._oboe_ctx = ctx\n self.request._oboe_finish_ev = ctx.create_event('exit', 'tornado') # adds edge from exit event -> enter event's md\n\n # report the exit event ID in the response header\n self.set_header(\"X-Trace\", self.request._oboe_finish_ev.id())", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"NextLaunchIntent\":\n return perform_next_launch_intent(intent, session)\n elif intent_name == \"MissionDetailIntent\":\n return get_color_from_session(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_launch(launch_request, session):\n\n session['attributes'] = {}\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()" ]
[ "0.7192836", "0.65895516", "0.64038146", "0.63481766", "0.6275576", "0.625636", "0.6239826", "0.6221745", "0.611795", "0.60871387", "0.6013772", "0.5998908", "0.5991289", "0.59416825", "0.59129614", "0.5898017", "0.5889853", "0.5842317", "0.58409756", "0.58315444", "0.5790977", "0.5786596", "0.57813686", "0.5753635", "0.5740456", "0.5739421", "0.5738726", "0.57090145", "0.5692903", "0.5691276", "0.56896496", "0.56750286", "0.5666347", "0.5666347", "0.5658505", "0.5658505", "0.5658505", "0.5658505", "0.5658505", "0.5658505", "0.5658505", "0.5658505", "0.5658505", "0.5658505", "0.5658505", "0.5644855", "0.56425005", "0.56415343", "0.55875915", "0.55706424", "0.5569739", "0.55527073", "0.55525005", "0.55497634", "0.55364305", "0.5526163", "0.5515276", "0.54969126", "0.5490685", "0.5482825", "0.548048", "0.5465374", "0.5463696", "0.54629004", "0.5439287", "0.5416989", "0.5416989", "0.5415899", "0.54155695", "0.5410285", "0.5410285", "0.5410285", "0.5410285", "0.5410285", "0.5410285", "0.5410285", "0.5410285", "0.5410285", "0.5410285", "0.5410285", "0.5410285", "0.5410285", "0.5410285", "0.5410285", "0.5407127", "0.5403324", "0.5396827", "0.53937054", "0.53879625", "0.5384056", "0.5367327", "0.53670156", "0.53670156", "0.53670156", "0.53506106", "0.53503805", "0.53343284", "0.53204256", "0.530624", "0.53035706", "0.5303165" ]
0.0
-1
Called when the session starts.
def on_session_started(self): print( MyCityController.LOG_CLASS, '[method: on_session_started]', '[requestId: ' + str(self._mcd.request_id) + ']', '[sessionId: ' + str(self._mcd.session_id) + ']' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_start(self, session):\n pass", "def on_session_started():\n #print(\"on_session_started\")", "def on_session_started():\n #print(\"on_session_started\")", "def on_session_started(session_started_request, session):", "def init_session(self):\n pass", "def init_session(self):\n pass", "def on_session_started(session_started_request, session):\r\n # Add additional code here as needed\r\n pass", "def on_session_started(session_started_request, session):\n # Add additional code here as needed\n pass", "def start_session(self):\r\n ee.Initialize()", "def on_start(self):\n self.login()", "def on_start(self):\n self.login()", "def on_start(self):\n self.login()", "def on_start(self):\n self.login()", "def on_startup(self) -> None:\n ...", "def on_session_started(session_started_request, session):\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])\n # any initialization logic goes here", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def __post_init__(self):\n self._session = Session()\n self._post_hooks()", "def on_start(self):\n self.init()", "async def start_session(self):\n\t\t...", "def on_start(self, session):\n self.put_prompt(session)", "def started(self):", "def on_start(self):", "def on_start(self):", "def post_start(self):", "def pre_start(self) -> None:\n pass", "def on_start(self, ctx):\n pass", "def on_session_start():\n session_attributes[NUMBER_LIST_KEY] = [LOWER + i for i in range(UPPER - LOWER + 1)]\n session_attributes[LAST_QUESTION_KEY] = NO_QUESTION\n session_attributes[LAST_EXTENSION_KEY] = ''\n session_attributes[NUM_QUESTIONS_KEY] = 0", "def Start(self) :\n\t\t...", "def on_start(self):\n # self.login()\n\n # self.createfiles()", "def _load_session(self):\n if not self.session.container_id:\n self.start()\n self._check_fifo()\n self._update_container()", "def on_session_started(session_started_request, session):\n\n\tprint(\"on_session_started requestId=\" + session_started_request['requestId']\n\t\t + \", sessionId=\" + session['sessionId'])", "def startup(self):\n pass", "def on_session_started(session_started_request, session):\n print(\"=====on_session_started requestId: \" +\n session_started_request['requestId'] + \", sessionId=\" +\n session['sessionId'])", "def startSession(self):\n self.storage.insert(self.__json__())", "def on_session_started(self, f):\n self._on_session_started_callback = f\n\n return f", "def session_start(self, ignored):\n self.get_online_users()", "def on_server_start(self):\n raise NotImplementedError", "def on_session_started(session_started_request, session):\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" +\n session_started_request['requestId'] + \", sessionId=\" +\n session['sessionId'])", "def on_before_render(self, request):\n \n cookie_name = request.get_action_parameter(\"session_cookie_name\",\n \"gyro-session-uuid\")\n uuid = request.get_cookie(cookie_name)\n \n session = None\n \n if uuid:\n session = self.storage.get_session(uuid)\n else:\n uuid = generate_uuid()\n \n request.session_uuid = uuid\n \n if session is not None:\n request.session = session\n else:\n def set_session(r):\n if not r:\n r = {}\n \n request.session = r\n \n return plugin.run_hook(\"on_new_session\", request).add_callback(\n set_session)", "def start(self):\n ...", "def on_start(self):\n self.state = STARTED", "def on_session_started(session_started_request, session):\n \n #session.attributes['result_number'] = 1\n session['attributes'] = {}\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\r\n\r\n print(\"on_session_started requestId=\" + session_started_request['requestId']\r\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\r\n\r\n print(\"on_session_started requestId=\" + session_started_request['requestId']\r\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\r\n\r\n print(\"on_session_started requestId=\" + session_started_request['requestId']\r\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\r\n\r\n print(\"on_session_started requestId=\" + session_started_request['requestId']\r\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\r\n\r\n print(\"on_session_started requestId=\" + session_started_request['requestId']\r\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\r\n\r\n print(\"on_session_started requestId=\" + session_started_request['requestId']\r\n + \", sessionId=\" + session['sessionId'])", "def on_session_started(session_started_request, session):\n\n print(\"on_session_started requestId=\" + session_started_request['requestId'] +\n \", sessionId=\" + session['sessionId'])", "def onInit(self):\n pass", "def start(self):\r\n pass", "def startup(self) -> None:", "def initialize(self):\n self.login()", "def on_starting(self):\n\n self.set_capture_events_from_config()", "def on_pre_enter(self):\n self.setup()\n self.start()", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def __init__(self):\r\n # create a session id\r\n self.session = ViSession()", "def on_start(self):\n self.write_log(\"策略启动\")", "def on_start(self):\n self.write_log(\"策略启动\")", "def on_start(self):\n self.write_log(\"策略启动\")", "def on_start(self):\n self.write_log(\"策略启动\")", "def _start(self):\n pass", "def on_start(self):\n self.logger.debug(\"Starting...\")\n pass", "def start (self):\n pass", "def start (self):\n pass" ]
[ "0.87699443", "0.83996505", "0.83996505", "0.78695554", "0.7841736", "0.7841736", "0.7705618", "0.7561237", "0.75006485", "0.7434301", "0.7434301", "0.7434301", "0.7434301", "0.7385277", "0.7357132", "0.7287912", "0.7287912", "0.7287912", "0.7287912", "0.7287912", "0.7287912", "0.7287912", "0.7287912", "0.72207725", "0.7205935", "0.7090715", "0.70482934", "0.6992876", "0.6976886", "0.6976886", "0.6951633", "0.69482803", "0.6880389", "0.68780714", "0.6871586", "0.6860496", "0.6831591", "0.68310004", "0.68262106", "0.67842555", "0.67770547", "0.67703086", "0.67625755", "0.67370474", "0.67273813", "0.67273813", "0.67212415", "0.67188126", "0.67159694", "0.66948736", "0.6683563", "0.6674552", "0.6674552", "0.6674552", "0.6674552", "0.6674552", "0.6674552", "0.6674552", "0.6674552", "0.6674552", "0.6674552", "0.6674552", "0.6674552", "0.6674552", "0.6674552", "0.6674552", "0.6674552", "0.6674552", "0.6674552", "0.6674552", "0.66671497", "0.66671497", "0.66671497", "0.66671497", "0.66671497", "0.66671497", "0.6664408", "0.66630036", "0.6632837", "0.6628441", "0.6609735", "0.65922284", "0.6573688", "0.65649694", "0.65649694", "0.65649694", "0.65649694", "0.65649694", "0.65649694", "0.65649694", "0.65649694", "0.65530443", "0.6552768", "0.6552768", "0.6552768", "0.6552768", "0.65404886", "0.6521362", "0.6519816", "0.6519816" ]
0.71602905
25
Called when the user launches the skill without specifying what they want.
def on_launch(self): print( MyCityController.LOG_CLASS, '[method: on_launch]', '[requestId: ' + str(self._mcd.request_id) + ']', '[sessionId: ' + str(self._mcd.session_id) + ']' ) # Dispatch to your skill's launch return self.get_welcome_response()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_launch(launch_request, session):\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n # Dispatch to your skill's launch message\n return get_welcome_response()", "def on_launch(launch_request, session):\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_help_response()", "def on_launch(launch_request, session):\r\n # Dispatch to your skill's launch message\r\n return get_welcome_response()", "def on_launch(launch_request, session):\r\n\r\n #print(\"****on_launch requestId=\" + launch_request['requestId'] +\r\n # \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()", "def on_launch(launch_request, session):\r\n\r\n print(\"on_launch requestId=\" + launch_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return skill_information()", "def on_launch(launch_request, session):\n\n session['attributes'] = {}\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\r\n print(\"on_launch requestId=\" + launch_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\r\n\r\n print(\"on_launch requestId=\" + launch_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()", "def on_launch(launch_request, session):\r\n\r\n print(\"on_launch requestId=\" + launch_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()", "def on_launch(launch_request, session):\r\n\r\n print(\"on_launch requestId=\" + launch_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response(session)", "def on_launch(launch_request, session):\n print(\"on_launch requestId=\" + launch_request['requestId'] + \", sessionId=\" + session['sessionId'])\n \n # Dispatch to your skill's launch\n return get_welcome_response(session)", "def handler(event, context):\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def skill(ctx: Context, public_id: PublicId):\n _eject_item(ctx, \"skill\", public_id)", "def on_launch(launch_request, session):\n\n\tprint(\"on_launch requestId=\" + launch_request['requestId'] +\n\t\t \", sessionId=\" + session['sessionId'])\n\t# Get's the help section\n\treturn get_welcome_response()", "def new_skill_interaction(self, skill):\n self.skill_interact[skill] = True", "def launch_intent():\n welcome_message = \"On which cloud would you like to launch Galaxy?\"\n return question(welcome_message).reprompt(help_text)", "def pre_launch(mission):\n started_since = mission.ut() - mission.current_step[\"start_ut\"]\n if started_since > 5:\n mission.next()\n elif mission.current_step[\"first_call\"]:\n vessel = mission.conn.space_center.active_vessel\n ap = vessel.auto_pilot\n\n ap.engage()\n ap.target_pitch_and_heading(90, 90)\n vessel.control.throttle = 1\n vessel.control.sas = False\n vessel.control.rcs = mission.parameters.get('use_rcs', False)", "def hook (self, *args, **kwargs):\n self.launch([\"--fastexit\"])", "def install_default_skills(speak=True):\n if exists(MSM_BIN):\n p = subprocess.Popen(MSM_BIN + \" default\", stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE, shell=True)\n (output, err) = p.communicate()\n res = p.returncode\n if res == 0 and speak:\n # ws.emit(Message(\"speak\", {\n # 'utterance': mycroft.dialog.get(\"skills updated\")}))\n pass\n elif not connected():\n LOG.error('msm failed, network connection is not available')\n ws.emit(Message(\"speak\", {\n 'utterance': mycroft.dialog.get(\"no network connection\")}))\n elif res != 0:\n LOG.error('msm failed with error {}: {}'.format(res, output))\n ws.emit(Message(\"speak\", {\n 'utterance': mycroft.dialog.get(\n \"sorry I couldn't install default skills\")}))\n\n else:\n LOG.error(\"Unable to invoke Mycroft Skill Manager: \" + MSM_BIN)", "def on_launch(launch_request, session, state):\n\n print(\"on_launch \"+str(launch_request)+\" \"+str(session)+\" requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n # If new or corrupted user, prompt to set up first\n userId = session[\"user\"][\"userId\"]\n query_user = get_info(userId)\n if len(query_user) == 0 or \\\n (len(query_user) > 0 and len(query_user[0].keys()) != NUM_DB_COLS):\n if len(query_user) > 0 and len(query_user[0].keys()) != NUM_DB_COLS:\n delete_info(userId)\n \n return new_user_intro(session, state)\n \n # For existing users, greet by name, talk about main focus, give commands to check in\n return existing_user_intro(session, state)", "def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1", "def lambda_handler(event, context):\n\n \"\"\"\n This statement prevents someone else from configuring a skill that sends \n requests to this function.\n \"\"\"\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def setup_class(cls):\n super().setup_class()\n cls.add_item(\"skill\", str(cls.GENERIC_SELLER.public_id), local=False)", "def on_launch(event_request, session):\n print(\"=====on_launch requestId: \" + event_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n return play_new_game(False)", "def __init__(__self__, *,\n alexa_skill_id: pulumi.Input[str],\n is_enabled: pulumi.Input[bool]):\n pulumi.set(__self__, \"alexa_skill_id\", alexa_skill_id)\n pulumi.set(__self__, \"is_enabled\", is_enabled)", "def on_activate(self) -> None:", "async def on_experience(self, payload):\n\n self.keep = False\n self.stop()", "def required_skills(self, required_skills):\n\n self._required_skills = required_skills", "def call_launch_app_callback(self, app_name, **kwargs):\n raise NotImplementedError", "def on_start(self, session):\n self.put_prompt(session)", "def on_launch():\n return get_welcome_message()", "def test_dispatch_launch(self):\n @self.skill.launch\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n self.skill.response.sessionAttributes['run'] = True\n self.skill.request.request.type = 'LaunchRequest'\n self.skill.dispatch()\n self.assertTrue(self.skill.response.sessionAttributes['run'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n #if (event['session']['application']['applicationId'] != \"<APPLICATION_ID>\"):\n # raise ValueError(\"Invalid Application ID\")\n\n\n if event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])", "def on_launch(request):\n\n return get_launch_response()", "async def skill(self, ctx, *, skill: str):\n\n try:\n skill = self.get_entry('Skill', skill.lower())\n except RuntimeError as e:\n return await ctx.send(e)\n\n name = skill['Name']\n\n embed = discord.Embed(title=name)\n embed.set_thumbnail(url='attachment://skill.png')\n embed.add_field(name='Learned', value=skill['Class/Rank'], inline=False)\n embed.add_field(name='Effect', value=skill['Effect'])\n\n await ctx.send(file=discord.File(f'xenox/skills/{name}.png', 'skill.png'), embed=embed)", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n\n if intent_name not in skillmap:\n intent_name = \"NullSkill\"\n\n if intent_name in skillmap:\n try:\n return skillmap[intent_name].execute(intent, session)\n except Exception as e:\n traceback.print_exc()\n return SkillBase().respond(\"Sorry I missed that\", \"Error\", str(e))\n else:\n raise ValueError(\"Invalid intent\")", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])", "def _onPremade(self, event):\n self.openPremade()", "def test_process_invalid1(self):\n self.skill.logic = {}\n self.skill.valid.app_id = '12345'\n @self.skill.launch\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n pass\n self.skill.logic['LaunchRequest']()\n self.assertFalse(self.skill.process(data.SAMPLE_LAUNCH_REQUEST))", "def onAudioIntent(self, *args, intentName):\n\n print(intentName, *args)\n if intentName == 'name' and len(args) > 0:\n self.name = args[0]\n self.nameLock.release()\n elif intentName == 'origin' and len(args) > 0:\n self.origin = args[0]\n self.originLock.release()\n elif intentName == 'age' and len(args) > 0:\n for arg in args:\n if arg.isdigit():\n self.age = arg\n self.ageLock.release()\n elif intentName == 'exclusion' and len(args) > 0:\n self.exclusion = args[0]\n self.exclusionLock.release()\n elif intentName == 'conflict' and len(args) > 0:\n self.conflict = args[0]\n self.conflictLock.release()\n elif intentName == 'inhumanity' and len(args) > 0:\n self.inhumanity = args[0]\n self.inhumanityLock.release()\n elif intentName == 'family' and len(args) > 0:\n self.family = args[0]\n self.familyLock.release()\n elif intentName == 'reason' and len(args) > 0:\n self.reason = args[0]\n self.reasonLock.release()\n elif intentName == 'route' and len(args) > 0:\n self.route = args[0]\n self.routeLock.release()\n elif intentName == 'entrance' and len(args) > 0:\n self.entrance = args[0]\n self.entranceLock.release()\n elif intentName == 'yesno' and len(args) > 0:\n self.documentation = args[0]\n self.documentationLock.release()\n elif intentName == 'company' and len(args) > 0:\n self.company = args[0]\n self.companyLock.release()", "def fallback_handler(handler_input):\n # type: (HandlerInput) -> Response\n session_attr = handler_input.attributes_manager.session_attributes\n\n speech_text = (\n \"The {} skill can't help you with that.\".format(SKILL_NAME))\n\n handler_input.response_builder.speak(\n speech_text).set_should_end_session(False)\n return handler_input.response_builder.response", "def take_action(self, *args, **kwargs):\r\n pass", "def on_activate(self):", "def main():\n user_interaction()", "def on_start(self, ctx):\n pass", "def user_call(self, frame, argument_list):\r\n if self._wait_for_mainpyfile:\r\n return\r\n if self.stop_here(frame):\r\n self.interaction(frame)", "def on_tool(self):\n if self.tmFile is not None:\n self.log.info(\"Launch tool %s\" % self.pItem.itemName)\n toolMngrCmds.launchTools(self.pItem.itemName, self.tmFile, self.log.level)", "async def post_launch(self, **kwargs: Any) -> None:\n pass", "def on_launch(event, launch_request, session):\n\n logger.info(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n return get_generic_welcome_message()", "def on_launch(request):\n\n return get_launch_error_response()", "def execute_for_command(self, skill_input: SkillInput, services: AssistantServicesBase):\n voice = skill_input.adjective.lower()\n if voice in (\"female\", \"male\"):\n services.settings_service.voice = voice\n services.settings_service.save_settings()\n services.user_interaction_service.speak('Okay, I will use a %s voice from now on.' % (voice), True)\n else:\n services.user_interaction_service.speak('I don\\'t understand what voice you want')", "def test_with_no_role(self, do_student_launch, student_payload):\n student_payload[\"https://purl.imsglobal.org/spec/lti/claim/roles\"] = [\"\"]\n\n response = do_student_launch()\n\n assert_launched_as_student(response)", "def __init__(self):\n super(LaunchRequest, self).__init__()", "def hook_greet(self):\n ui.greet()", "def skill_menu():\n while True:\n menu_options = load_menu_options()\n menu_complete = menu_options[:]\n menu_complete.append('Add new skill')\n questions = [\n inquirer.List('skill',\n message=\"What skill are you improving?\",\n choices=menu_complete,\n carousel=True\n ),\n ]\n selections = inquirer.prompt(questions)\n if selections['skill'] == menu_complete[-1]:\n entering_active = True\n while entering_active:\n skill_name = input(\"\\nGive this new skill a name: \").title()\n if len(skill_name) > 3:\n initiate_new_sheet(skill_name)\n entering_active = False\n else:\n print('ERROR: The skill name must be longer than three characters.')\n elif selections['skill'] in menu_options:\n # Get the sheet object of Python\n sheet_name = selections['skill']\n break\n return sheet_name", "def on_intent(event):\n\n intent = event[\"request\"][\"intent\"][\"name\"]\n\n if intent in (\"AMAZON.CancelIntent\", \"AMAZON.StopIntent\", \"AMAZON.NoIntent\"):\n return handle_session_end_request()\n\n if intent == \"AMAZON.YesIntent\":\n if \"attributes\" in event[\"session\"] and \"previousIntent\" in \\\n event[\"session\"][\"attributes\"]:\n\n if event[\"session\"][\"attributes\"][\"previousIntent\"] == \"AMAZON.HelpIntent\":\n return main_handler(event)\n\n speech_output = event[\"session\"][\"attributes\"][\"nextStations\"]\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n speech_output = \"Sorry, something went wrong.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n if intent == \"isBikesAvailable\":\n return main_handler(event)\n\n if intent == \"AMAZON.HelpIntent\":\n return handle_help_intent()\n\n speech_output = \"Sorry, I don\\'t know that.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)", "def will_activate(self):\n pass", "def _default(self):\n self.app.args.print_help()", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"<YOUR INTENT NAME HERE>\":\n # Update the wordsmith_data variable with your data. Use key, value\n # pairs where the key is the column name in Wordsmith and the value is\n # the value contained in that column\n wordsmith_data = { 'column1': 'value1', 'column2': 'value2' }\n narrative = wordsmith.generate(WORDSMITH_API_KEY, WORDSMITH_PROJECT_SLUG, WORDSMITH_TEMPLATE_SLUG, wordsmith_data)\n if 'errors' not in narrative:\n return build_response(session.get('attributes', {}), build_speechlet_response('Wordsmith Generated Response', narrative['data']['content'],\n '<REPROMPT TEXT HERE>', True))\n else:\n if not isinstance(narrative['errors'], list) :\n return build_response(session.get('attributes', {}), build_speechlet_response('Wordsmith Generation Error', 'Wordsmith reported the following error: {}'.format(narrative['errors']['detail']),\n '<REPROMPT TEXT HERE>', True))\n else:\n details = ', '.join([e['details'] for e in narrative['errors']])\n return build_response(session.get('attributes', {}), build_speechlet_response('Wordsmith Generation Error', 'Wordsmith reported the following error: {}'.format(details),\n '<REPROMPT TEXT HERE>', True))\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def _skills_manager_dispatch():\n global ws\n ws.emit(Message(\"skill_manager\", {}))", "def handle_awoken():\n LOG.info(\"Listener is now Awake: \")\n context = {'client_name': 'mycroft_listener',\n 'source': 'audio',\n 'destination': [\"skills\"]}\n bus.emit(Message('mycroft.awoken', context=context))", "def test_get_skill_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy.name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"dummy\\n\"", "def launch(self):", "def on_actionDemo_triggered(self):\n self.start_app(DemoApp)", "def use_skill(self, g, i, x, y):\n # @ param g a reference to the game engine\n # @ param i the index of the skill (basically what skill)\n # @ param x the x target coordinate in game pixels\n # @ param y the y target coordinate in game pixels\n if self.attackTimer < self.attackDelay:\n print(\"attack on CD\")\n return\n \n if self.skill[i].skillAttr == 0:\n g.fire_skill_sound.play()\n elif self.skill[i].skillAttr == 1:\n g.ice_skill_sound.play()\n elif self.skill[i].skillAttr == 2:\n g.lightning_skill_sound.play()\n elif self.skill[i].skillAttr == 3:\n g.poison_skill_sound.play()\n \n \n if self.skill[i].skillKey == 0: #Aura\n #turn the aura on/off\n if self.skill[i].active == False:\n #print(\"aura on\")\n self.skill[i].active = True\n else:\n self.skill[i].active = False\n #print(\"aura off\")\n \n elif self.skill[i].skillKey == 1: #Missile\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n target = Target(x, y)\n center_x = self.rect.x + (self.rect.width / 2)\n center_y = self.rect.y + (self.rect.height / 2)\n #bullet types: fire 5, ice 6, lightning 7\n #skill types: fire 0, ice 1, lightning 2\n g.bullets.append(self.bulletFactory.createBullet(g, self.skill[i].skillAttr + 5, 0, self.attack, 1024, target, center_x, center_y))\n #print(\"missile\")\n\n elif self.skill[i].skillKey == 2: #Breath\n #for each creep in the AoE cone, do damage.\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n #get low and high angle (-45 degrees and +45 degrees from player -> point angle)\n lowAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) - 3.1415 / 2.0\n highAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) + 3.1415 / 2.0\n for creep in g.creeps:\n #get angle to creep\n creepAngle = math.atan2(creep.rect.centery - self.rect.centery, creep.rect.centerx - self.rect.centerx)\n \n #if angle to the creep is between the two angles\n if creepAngle > lowAngle and creepAngle < highAngle:\n #and the distance to the creep is below the skill's range\n if ( (creep.rect.centerx - self.rect.centerx) ** 2 + (creep.rect.centery - self.rect.centery) ** 2 ) ** 0.5 < 4 * 24:\n creep.take_damage( self.attack )\n #print(\"breath\")\n #apply debuffs, based on type\n if self.skill[i].skillAttr == 0: #fire\n creep.applyBurning()\n elif self.skill[i].skillAttr == 1: #frost\n creep.applyChilled()\n elif self.skill[i].skillAttr == 2: #lightning\n creep.applyShocked()", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n if (event['session']['application']['applicationId'] !=\n \"amzn1.ask.skill.xxxx\"):\n #Set Alexa Skill ID\n raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def chooseAction(self):\n print \"nothing\"\n pass", "def addSkill(self, newskill):\n self.skills.append( newskill )", "def applicationDidFinishLaunching_(self, aNotification):", "async def optin(self, ctx):\n optout.delete_one({\"_id\": ctx.author.id})\n await ctx.send(f\"You have **opted into** A Sound Mood. To leave the program, use ?optout.\")", "def greetWaveHand(self):\n\n self.behaviorService.startBehavior(\"caresses/greetingsuk_silent\")", "def fallback_handler(handler_input):\n speech_text = \"See you later! Enjoy the hackathon.\"\n\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Hello World\", speech_text)).set_should_end_session(\n True)\n return handler_input.response_builder.response", "def _starting_up():\n global ws, skill_reload_thread, event_scheduler\n\n ws.on('intent_failure', FallbackSkill.make_intent_failure_handler(ws))\n\n # Create skill_manager listener and invoke the first time\n ws.on('skill_manager', skills_manager)\n ws.on('mycroft.internet.connected', install_default_skills)\n ws.emit(Message('skill_manager', {}))\n\n # Create the Intent manager, which converts utterances to intents\n # This is the heart of the voice invoked skill system\n\n PadatiousService(ws)\n IntentService(ws)\n event_scheduler = EventScheduler(ws)\n # Create a thread that monitors the loaded skills, looking for updates\n skill_reload_thread = WatchSkills()\n skill_reload_thread.daemon = True\n skill_reload_thread.start()\n\n # Wait until skills have been loaded once before starting to check\n # network connection\n skill_reload_thread.wait_loaded_priority()\n check_connection()", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass" ]
[ "0.6812588", "0.6779727", "0.6764538", "0.6764138", "0.65563494", "0.65384686", "0.6528976", "0.64852035", "0.64852035", "0.6459305", "0.64153165", "0.64153165", "0.64153165", "0.64153165", "0.64153165", "0.64153165", "0.64153165", "0.64153165", "0.64153165", "0.64153165", "0.64153165", "0.64153165", "0.64153165", "0.64153165", "0.64153165", "0.63939637", "0.63939637", "0.63939637", "0.63824195", "0.63623667", "0.6352821", "0.5870996", "0.5784539", "0.57396275", "0.57361734", "0.5714778", "0.566534", "0.5588397", "0.5533701", "0.5514367", "0.5474947", "0.54478484", "0.54207253", "0.5401689", "0.5380686", "0.5370558", "0.53297687", "0.5292161", "0.5290882", "0.52854484", "0.52725464", "0.52530813", "0.5250585", "0.52503467", "0.5249205", "0.5234932", "0.5231934", "0.5213159", "0.52119863", "0.5203359", "0.5200615", "0.51947266", "0.5177716", "0.51644903", "0.5163111", "0.5160196", "0.514295", "0.5131847", "0.51149184", "0.5097959", "0.5095056", "0.50905204", "0.50776035", "0.5063762", "0.5060868", "0.50598", "0.504361", "0.5043592", "0.50385344", "0.5035966", "0.5030425", "0.5018153", "0.50164944", "0.5016255", "0.50151795", "0.50140524", "0.5013956", "0.5011133", "0.5007253", "0.49763688", "0.49751902", "0.49734885", "0.49723303", "0.4970102", "0.4970102", "0.4970102", "0.4970102", "0.4970102", "0.4970102", "0.4970102" ]
0.6200907
31
If the event type is "request" and the request type is "IntentRequest", this function is called to execute the logic associated with the provided intent and build a response.
def on_intent(self): mcd = self._mcd print( self.LOG_CLASS, '[method: on_intent]', '[intent: ' + mcd.intent_name + ']', 'MyCityDataModel received:', mcd ) # Check if the user is setting the address. This is special cased # since they may have been prompted for this info from another intent if mcd.intent_name == "SetAddressIntent": set_address_in_session(mcd) if intent_constants.ADDRESS_PROMPTED_FROM_INTENT \ in mcd.session_attributes: # User was prompted for address from another intent. # Set our current intent to be that original intent now that # we have set the address. mcd.intent_name = mcd.session_attributes[intent_constants.ADDRESS_PROMPTED_FROM_INTENT] print("Address set after calling another intent. Redirecting " "intent to {}".format(mcd.intent_name)) # Delete the session key indicating this intent was called # from another intent. del mcd.session_attributes[intent_constants.ADDRESS_PROMPTED_FROM_INTENT] else: return get_address_from_session(mcd) # session_attributes = session.get("attributes", {}) if mcd.intent_name == "GetAddressIntent": return get_address_from_session(mcd) elif mcd.intent_name == "TrashDayIntent": return request_user_address_response(mcd) \ if intent_constants.CURRENT_ADDRESS_KEY \ not in mcd.session_attributes \ else get_trash_day_info(mcd) elif mcd.intent_name == "SnowParkingIntent": return request_user_address_response(mcd) \ if intent_constants.CURRENT_ADDRESS_KEY \ not in mcd.session_attributes \ else get_snow_emergency_parking_intent(mcd) elif mcd.intent_name == "GetAlertsIntent": return get_alerts_intent(mcd) elif mcd.intent_name == "AMAZON.HelpIntent": return self.get_welcome_response() elif mcd.intent_name == "AMAZON.StopIntent" or \ mcd.intent_name == "AMAZON.CancelIntent": return self.handle_session_end_request() elif mcd.intent_name == "UnhandledIntent": return unhandled_intent(mcd) else: raise ValueError("Invalid intent")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_intent(intent_request, session):\r\n\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n\r\n # Dispatch to your skill's intent handlers\r\n if intent_name == \"test\":\r\n return get_test_response()\r\n elif intent_name==\"inputoutputIntent\":\r\n return get_inputOutputIntent_response(intent)\r\n elif intent_name==\"lightsIntent\":\r\n return get_lightsIntent_response(intent)\r\n elif intent_name==\"shadesIntent\":\r\n return get_shadesIntent_response(intent)\r\n elif intent_name==\"volumeIntent\":\r\n return get_volumeIntent_response(intent)\r\n elif intent_name==\"InputPresetIntent\":\r\n return get_InputPresetIntent_response(intent)\r\n elif intent_name==\"monitorsIntent\":\r\n return get_monitorsIntent_response(intent)\r\n elif intent_name==\"bossIntent\":\r\n return get_bossIntent_response()\r\n elif intent_name==\"AudioCall\":\r\n return get_AudioCall_response(intent)\r\n elif intent_name == \"AMAZON.HelpIntent\":\r\n return get_welcome_response()\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return handle_session_end_request()\r\n else:\r\n raise ValueError(\"Invalid intent\")", "def on_intent(request, session):\n\n intent_name = request['intent']['name']\n \n # process the intents\n if intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n \n elif intent_name == \"AMAZON.StopIntent\":\n return get_stop_response()\n \n elif intent_name == \"AMAZON.CancelIntent\":\n return get_stop_response()\n \n elif intent_name == \"AMAZON.FallbackIntent\":\n return get_fallback_response()\n \n elif intent_name == \"recognizeDates\":\n slots = request['intent']['slots']\n date_start_slot = slots.get('dateStart',{'value':'NA'}).get('value','NA')\n date_end_slot = slots.get('dateEnd',{'value':'NA'}).get('value','NA')\n\n return get_intent_response(date_start_slot,date_end_slot)\n \n elif intent_name == \"PollHprofs\":\n slots = request['intent'].get('slots','')\n print(slots)\n speechOutput = \"Under development\"\n return response(speech_response(speechOutput, True))\n\n elif intent_name == \"SpinVMs\":\n slots = request['intent'].get('slots','')\n print(slots)\n speechOutput = \"Under development\"\n return response(speech_response(speechOutput, True))\n\n else:\n print(\"For invalid Intents reply with help\")\n return get_help_response()", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n if intent_name == \"GUTSIntent\":\n session_attributes = {}\n return build_response(session_attributes, build_speechlet_response(\n \"GUTSCard\", \"I have the GUTS\", \"I love hackathons\", True))\n\n if intent_name == \"LoveAIntent\":\n #session_attributes = {}\n #if loveStage = 1:\n # return build_response(session_attributes, build_speechlet_response(\n # \"Love1Card\", \"I love Theo!\", \"I love Theo so much!\", False))\n return handle_love_A_intent(session)\n\n if intent_name == \"LoveBIntent\":\n return handle_love_B_intent(session)\n\n if intent_name == \"LoveCIntent\":\n return handle_love_C_intent(session)", "def on_intent(intent_request, session):\n\n\tprint(\"on_intent requestId=\" + intent_request['requestId'] +\n\t\t \", sessionId=\" + session['sessionId'])\n\n\tintent = intent_request['intent']\n\tintent_name = intent_request['intent']['name']\n\n\t# Sends the request to one of our intents\n\tif intent_name == \"sendVideoIntent\":\n\t\treturn sendVideo(intent, session)\n\telif intent_name == \"setVolumeIntent\":\n\t\treturn setVolume(intent, session)\n\telif intent_name == \"AMAZON.PauseIntent\":\n\t\treturn pauseVideo(intent, session)\n\telif intent_name == \"AMAZON.ResumeIntent\":\n\t\treturn resumeVideo(intent, session)\n\telif intent_name == \"AMAZON.HelpIntent\":\n\t\treturn get_welcome_response()\n\telif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n\t\treturn handle_session_end_request()\n\telse:\n\t\traise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId: %s, Session: %s\" % (intent_request['requestId'], session))\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"SendHevantePoints\":\n return send_hevante_points(intent, session)\n elif intent_name == \"GetPoints\":\n return get_points(intent, session)\n elif intent_name == \"GetName\":\n return get_name(intent, session)\n elif intent_name == \"GetReason\":\n return get_reason(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent: %s\" % intent_name)", "def on_intent(intent_request, session):\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"CountryStatusIntent\":\n return get_country_info(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_start_end_response(False)\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return get_start_end_response(True)\n else:\n return get_start_end_response(False)", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n # intents_object = get_custom_intents()\n print (\"************\")\n print (intent_request)\n # fall_back = True\n # final_function = ''\n # for temp_intent in intents_object:\n # if temp_intent == intent_name:\n # fall_back = False\n # final_function = temp_intent[1]\n # break\n # if(fall_back):\n # return custom_handlers.get_fallback_msg()\n # else:\n # return final_function(intent, session)\n \n # Dispatch to your skill's intent handlers\n if intent_name == \"welcome_intent\":\n return custom_handlers.get_welcome_msg(intent, session)\n elif intent_name == \"search_intent\":\n return custom_handlers.get_search_msg(intent, session)\n elif intent_name == \"architecture\":\n return custom_handlers.get_architecture_msg(intent, session)\n elif intent_name == \"saybye\":\n return custom_handlers.get_saybye_response(intent, session)\n elif intent_name == \"myname\":\n return custom_handlers.get_myname_response(intent, session)\n elif intent_name == \"ask\":\n return custom_handlers.get_ask_response(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return custom_handlers.get_welcome_response(intent, session)\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return custom_handlers.handle_session_end_request(intent, session)\n else:\n return custom_handlers.get_fallback_msg(intent, session)", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n # Dispatch to your skill's intent handlers\n if intent_name == \"test\":\n return get_test_response()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"forecast\":\n return get_forecast_response()\n elif intent_name == \"detailedforecast\":\n return get_detailed_forecast_response()\n elif intent_name == \"uscanadaforecast\":\n return get_uscanada_forecast_response()\n elif intent_name == \"detaileduscanadaforecast\":\n return get_detailed_uscanada_forecast_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(event_request, session):\n print(\"=====on_intent requestId: \" + event_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = event_request['intent']\n intent_name = event_request['intent']['name']\n print(\"=====intent is: \" + intent_name)\n\n if intent_name == \"AnswerIntent\":\n print(\"=====AnswerIntent fired...\")\n if 'attributes' in session:\n if 'questions' in session['attributes']:\n return handle_answer_request(intent, session)\n\n # we probably got here because user said something other than\n # yes or no after asking if they wanted to play the game again\n print(\"=====no attributes ending game\")\n return play_end_message()\n if intent_name == \"GameIntent\":\n print(\"=====GameIntent fired...\")\n # if there's a session and we're in a game treat this as an answer\n # unfortunately it will be wrong but it's better than starting over\n if 'attributes' in session:\n if session['attributes']['game_status'] == \"in_progress\":\n return handle_answer_request(intent, session)\n return play_new_game(False)\n if intent_name in (\"AMAZON.StartOverIntent\", \"AMAZON.YesIntent\"):\n print(\"=====StartOverIntent or YesIntent fired...\")\n return play_new_game(True)\n if intent_name == \"AMAZON.NoIntent\":\n print(\"=====NoIntent fired...\")\n # if there's a session and we're in a game treat this as a wrong answer\n if 'attributes' in session:\n if session['attributes']['game_status'] == \"in_progress\":\n return handle_answer_request(intent, session)\n # otherwise end the game\n return play_end_message()\n if intent_name in (\"AMAZON.StopIntent\", \"AMAZON.CancelIntent\"):\n print(\"=====StopIntent or CancelIntent fired\")\n return play_end_message()\n if intent_name == 'AMAZON.HelpIntent':\n print(\"=====HelpIntent...\")\n tts = \"During the game I'll give you 6 random brain teasers and only 8 \"\\\n \"seconds to anser each one... To make your mind muscles stronger, I \"\\\n \"won't repeat any of the questions, so try to remember all the \"\\\n \"details... You can say 'Start Over' if you'd like a new game, \"\\\n \"or make your guess for the last question...\"\n return speech(tts, session['attributes'], False, None)", "def on_intent(event, intent_request, session):\n\n logger.info(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent == \"NextBins\":\n return get_next_collection(event, session)\n\n return get_generic_welcome_message()", "def on_intent(intent_request, session):\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n \n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n # Dispatch to your skill's intent handlers\n if intent_name == \"GetLatestAgendaIntent\":\n return get_next_agenda_response(session)\n elif intent_name == \"GetLatestMotionsIntent\":\n return get_next_motions_response(session)\n elif intent_name == \"GetNextMotionIntent\":\n return get_next_motions_response(session)\n elif intent_name == \"SetPhoneNumberIntent\":\n return text_url_to_number(session, intent);\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\r\n\r\n print(\"on_intent requestId=\" + intent_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n\r\n # Dispatch to your skill's intent handlers\r\n if intent_name == \"MakeCoffee\":\r\n return make_coffee(intent, session)\r\n elif intent_name == \"TurnCoffeeMachine\":\r\n return turn_coffee_machine(intent, session)\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return turn_off_coffee_machine()\r\n else:\r\n return invalid_intent()\r\n #raise ValueError(\"Invalid intent\")\r", "def on_intent(request, session):\n\n intent_name = request['intent']['name']\n\n # process the intents\n if intent_name == \"comenzar\":\n return get_fact_response()\n elif intent_name == \"otravez\":\n return get_fact_response()\n elif intent_name == \"AMAZON.YesIntent\":\n return get_fact_response()\n elif intent_name == \"AMAZON.NoIntent\":\n return get_stop_response()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n elif intent_name == \"AMAZON.StopIntent\":\n return get_stop_response()\n elif intent_name == \"AMAZON.CancelIntent\":\n return get_stop_response()\n elif intent_name == \"AMAZON.FallbackIntent\":\n return get_fallback_response()\n else:\n print(\"invalid Intent reply with help\")\n return get_help_response()", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"RandNumIntent\":\n return generate_random_num(intent, session)\n elif intent_name == \"RangeRandNumIntent\":\n return generate_random_num(intent, session)\n elif intent_name == \"DiceIntent\":\n return generate_random_num(intent, session, num1=1, num2=6)\n elif intent_name == \"HundredDiceIntent\":\n return generate_random_num(intent, session, num1=1, num2=100)\n elif intent_name == \"RouletteIntent\":\n return generate_random_num(intent, session, num1=1, num2=10)\n elif intent_name == \"SelectIntent\":\n return generate_random_num(intent, session, num1=1)\n elif intent_name == \"RepeatIntent\":\n if 'attributes' not in session:\n return handle_error_status()\n else:\n attributes = session.get('attributes')\n return generate_random_num(intent, session, **attributes)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n\n if intent_name not in skillmap:\n intent_name = \"NullSkill\"\n\n if intent_name in skillmap:\n try:\n return skillmap[intent_name].execute(intent, session)\n except Exception as e:\n traceback.print_exc()\n return SkillBase().respond(\"Sorry I missed that\", \"Error\", str(e))\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n print(intent)\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"WhoIs\":\n intent_search = intent['slots']['ASN']['value']\n return whois(intent_search)\n elif intent_name == \"WherePeer\":\n intent_search = intent['slots']['company']['value']\n return wherePeer(intent_search)\n elif intent_name == \"WhoPeers\":\n intent_search = intent['slots']['IX']['value']\n return whoPeers(intent_search)\n elif intent_name == \"WhosAt\":\n intent_search = intent['slots']['facility']['value']\n return whosAt(intent_search)\n elif intent_name == \"RouteServers\":\n return routeServers()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"WhensNextTrainIntent\":\n return get_next_train(intent, session)\n elif intent_name == \"SetFavoriteStationIntent\":\n return set_favorite_station(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_help_response(session)\n elif intent_name == \"AMAZON.StopIntent\" or intent_name == \"AMAZON.CancelIntent\":\n return get_stop_response(session)\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(request, session):\n\n intent = request['intent']\n\n print(\"on_intent:\", intent)\n\n if intent[\"name\"] == \"AntwortIntent\":\n return handle_answer_request(intent, session)\n elif intent[\"name\"] == \"DontKnowIntent\":\n return handle_answer_request(intent, session)\n elif intent['name'] == \"AMAZON.RepeatIntent\":\n return handle_repeat_request(intent, session)\n elif intent['name'] == \"AMAZON.StopIntent\" or intent['name'] == \"AMAZON.CancelIntent\":\n return handle_finish_session_request(intent, session)\n elif intent['name'] == \"AMAZON.HelpIntent\":\n return get_help(intent, session)\n elif intent['name'] == \"StartQuizIntent\" or intent['name'] == \"AMAZON.StartoverIntent\":\n if session[\"new\"] == False:\n return get_welcome_message(restart=True)\n #if no intent is identified:\n return get_help(intent, session)", "def on_intent(event):\n\n intent = event[\"request\"][\"intent\"][\"name\"]\n\n if intent in (\"AMAZON.CancelIntent\", \"AMAZON.StopIntent\", \"AMAZON.NoIntent\"):\n return handle_session_end_request()\n\n if intent == \"AMAZON.YesIntent\":\n if \"attributes\" in event[\"session\"] and \"previousIntent\" in \\\n event[\"session\"][\"attributes\"]:\n\n if event[\"session\"][\"attributes\"][\"previousIntent\"] == \"AMAZON.HelpIntent\":\n return main_handler(event)\n\n speech_output = event[\"session\"][\"attributes\"][\"nextStations\"]\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n speech_output = \"Sorry, something went wrong.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n if intent == \"isBikesAvailable\":\n return main_handler(event)\n\n if intent == \"AMAZON.HelpIntent\":\n return handle_help_intent()\n\n speech_output = \"Sorry, I don\\'t know that.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)", "def on_intent(intent_request, session):\r\n\r\n print(\"on_intent requestId=\" + intent_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n\r\n # Dispatch to your skill's intent handlers\r\n if intent_name == \"ConfirmYesOrNo\":\r\n return get_confirmation(intent, session)\r\n elif intent_name == \"CheckAnswer\":\r\n return check_answer(intent, session)\r\n elif intent_name == \"GetNextQuestion\":\r\n return next_question(intent, session)\r\n elif intent_name == \"RepeatQuestion\":\r\n return repeat_question(intent, session)\r\n elif intent_name == \"AMAZON.HelpIntent\":\r\n return get_welcome_response()\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return handle_session_end_request()\r\n else:\r\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent_name = \"\"\n if 'intent' in intent_request:\n intent = intent_request['intent']\n if 'name' in intent:\n intent_name = intent['name']\n\n # Dispatch to your skill's intent handlers\n if not intent_name:\n return get_help_response()\n elif intent_name == \"Hello\":\n return say_hello()\n elif intent_name == \"Brandon\":\n return say_brandon()\n elif intent_name == \"Warning\":\n return say_warning()\n elif intent_name == \"Dance\":\n return say_dance_lights()\n elif intent_name == \"Spot\":\n return say_spot_light()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n return say_hello()\n return get_help_response()", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"FindObjectIntent\":\n return set_find_object(intent, session)\n elif intent_name == \"GetTemperature\":\n return get_temp(intent, session)\n elif intent_name == \"GetHumidity\":\n return get_humidity(intent, session)\n elif intent_name == \"SetPanIntent\":\n return set_pan_angle(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n elif intent_name == \"Ja_Bitte\":\n return Ja_Bitte_session(intent, session)\n else:\n raise ValueError(\"Invalid intent\")", "def process_request(self, event, context):\n # if its a new session, run the new session code\n try:\n response = None\n if event['session']['new']:\n self.on_session_started({'requestId': event['request']['requestId']}, event['session'])\n\n # regardless of whether its new, handle the request type\n if event['request']['type'] == \"LaunchRequest\":\n response = self.on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n response = self.on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n response = self.on_session_ended(event['request'], event['session'])\n\n except Exception as exc:\n response = self.on_processing_error(event, context, exc)\n\n return response", "def on_intent(intent_request, session):\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"GetLottozahlen\":\n return get_Lottozahlen(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_help_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session, league):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"TeamIntent\":\n return get_team_info(intent, session, league)\n elif intent_name == \"LeaderIntent\":\n return get_leader(intent, session, league)\n elif intent_name == \"GSevenIntent\":\n return get_gseven(intent, session, league)\n elif intent_name == \"RelegationIntent\":\n return get_zfour(intent, session, league)\n elif intent_name == \"TableIntent\":\n return get_table(intent, session, league)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n print(\"---INTENT: \" + intent_name)\n\n # Dispatch to your skill's intent handlers\n try:\n if intent_name == \"GetSynonymIntent\":\n return get_synonym(intent, session)\n elif intent_name == \"GetRandomSynonymIntent\":\n return get_random_synonym(intent, session)\n elif intent_name == \"GetAllSynonymsIntent\":\n return get_all_synonyms(intent, session)\n elif intent_name == \"GetAntonymIntent\":\n return get_antonym(intent, session)\n elif intent_name == \"GetRandomAntonymIntent\":\n return get_random_antonym(intent, session)\n elif intent_name == \"GetAllAntonymsIntent\":\n return get_all_antonyms(intent, session)\n elif intent_name == \"GetPOSIntent\":\n return get_pos(intent, session)\n elif intent_name == \"GetRhymeIntent\":\n return get_rhyme(intent, session)\n elif intent_name == \"GetRandomRhymeIntent\":\n return get_random_rhyme(intent, session)\n elif intent_name == \"GetDefinitionIntent\":\n return get_definition(intent, session)\n elif intent_name == \"GetRandomDefinitionIntent\":\n return get_random_definition(intent, session)\n elif intent_name == \"GetAllDefinitionsIntent\":\n return get_all_definitions(intent, session)\n elif intent_name == \"GetSyllablesIntent\":\n return get_syllables(intent, session)\n elif intent_name == \"GetFrequencyIntent\":\n return get_frequency(intent, session)\n elif intent_name == \"GetPronunciationIntent\":\n return get_pronunciation(intent, session)\n elif intent_name == \"GetAllCommandsIntent\":\n return get_all_commands()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n response = build_speechlet_response(\"Error\", \"Sorry, I don't know that command. I can find definitions, synonyms, antonyms, and more if you say something like 'a synonym for happy'.\", None, True)\n return build_response({}, response)\n\n except:\n response = build_speechlet_response(\"Error\", \"Sorry, I don't know that word!\", None, True)\n return build_response({}, response)", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"YesIntent\":\n return set_phrase_in_session(intent, session)\n elif intent_name == \"AnswerIntent\":\n return check_answer(intent, session)\n elif intent_name==\"AMAZON.HelpIntent\":\n return get_help(intent,session)\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n return handle_error(intent, session)", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"HelloWorldIntent\":\n return handle_session_end_request()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"SetDeviceWaypoint\":\n return set_device_waypoint(intent, session)\n elif intent_name == \"TravelTo\":\n return travel_to(intent, session)\n elif intent_name == \"WhereAmI\":\n return where_am_i(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] + \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"StartIntent\": \n '''if \"attributes\" in session.keys():\n return answer_question(intent,session)\n '''\n return start_feedback(intent, session)\n \n elif intent_name == \"AnswerIntent\":\n return answer_question(intent, session)\n \n elif intent_name == \"AMAZON.ResumeIntent\":\n return resume_feedback(intent, session)\n \n elif intent_name == \"AMAZON.PauseIntent\":\n return pause_feedback(intent, session)\n \n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n \n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request(session)\n \n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\r\n\r\n print(\"on_intent requestId=\" + intent_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n #temp = intent_request['intent']['name']['slots']['name']\r\n # Dispatch to your skill's intent handlers\r\n if intent_name == \"MoveRight\":\r\n return set_session(intent, session)\r\n elif intent_name == \"MoveLeft\":\r\n return set_session(intent, session)\r\n elif intent_name == \"MoveForward\":\r\n return set_session(intent, session)\r\n elif intent_name == \"MoveBackward\":\r\n return set_session(intent, session)\r\n elif intent_name == \"Help\":\r\n return AskNow(intent, session)\r\n elif intent_name == \"Hint\":\r\n return bfs(intent, session)\r\n elif intent_name == \"AMAZON.HelpIntent\":\r\n return get_welcome_response()\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return handle_session_end_request()\r\n else:\r\n raise ValueError(\"Invalid intent\")", "def dispatch(intent_request):\r\n\r\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\r\n\r\n intent_name = intent_request['currentIntent']['name']\r\n\r\n # Dispatch to your bot's intent handlers\r\n if intent_name == 'gethousepredict':\r\n return housepredict(intent_request)\r\n elif intent_name == 'availablehouses':\r\n housetype = intent_request['currentIntent']['slots']['housetypesavail']\r\n location = intent_request['currentIntent']['slots']['locationavail']\r\n item_dtl = house_price_dtl(location,housetype)\r\n #print (\"housetype\",housetype)\r\n #print (\"location\",location)\r\n #print (\"House Pirce\",price)\r\n response = {\r\n \"dialogAction\": {\r\n \"type\": \"Close\",\r\n \"fulfillmentState\": \"Fulfilled\",\r\n \"message\": {\r\n \"contentType\": \"SSML\",\r\n \"content\": \" Hosue Details \\n {item_dtls}\".format(item_dtls = item_dtl)\r\n },\r\n }\r\n }\r\n print('result = ' + str(response))\r\n return response\r\n\r\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"NumberFact\":\n return num_fact(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"NextLaunchIntent\":\n return perform_next_launch_intent(intent, session)\n elif intent_name == \"MissionDetailIntent\":\n return get_color_from_session(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"NewMessageIntent\":\n return record_new_message(intent, session)\n elif intent_name == \"SaveIntent\":\n return process_save_intent(intent, session)\n elif intent_name == \"ReadMessageIntent\":\n return read_message(intent, session)\n elif intent_name == \"AddUserIntent\":\n return add_user(intent, session)\n elif intent_name == \"RemoveMessageIntent\":\n return remove_messages(intent, session)\n \n \n elif intent_name == \"AMAZON.HelpIntent\":\n return process_help_intent(intent, session)\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def dispatch(intent_request):\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'GreetingIntent':\n return GreetingIntent(intent_request)\n elif intent_name == 'DiningSuggestionsIntent':\n return DiningSuggestionsIntent(intent_request)\n elif intent_name == 'ThankYouIntent' :\n return ThankYouIntent(intent_request)\n \n\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n if('access_token' not in session):\n oauth_request(session)\n print(session['access_token'])\n # Dispatch to your skill's intent handlers\n if intent_name == \"Tracking\":\n return setFirstEleven(intent, session)\n elif intent_name == \"TrackingSecond\":\n return getParcelStatus(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\r\n\r\n print(\"on_intent requestId=\" + intent_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n print(str(session))\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n\r\n # Dispatch to your skill's intent handlers\r\n if intent_name == \"AMAZON.YesIntent\":\r\n if \"attributes\" not in session:\r\n return ask_first_question(intent, session)\r\n elif \"guessed\" in session[\"attributes\"]:\r\n return get_welcome_response()\r\n else:\r\n return handle_yes_response(intent, session)\r\n elif intent_name == \"AMAZON.NoIntent\":\r\n if \"attributes\" not in session:\r\n return get_welcome_response()\r\n elif \"guessed\" in session[\"attributes\"]:\r\n return handle_session_end_request()\r\n else:\r\n return handle_no_response(intent, session)\r\n elif intent_name == \"AMAZON.StartOverIntent\":\r\n return get_welcome_response()\r\n elif intent_name == \"AMAZON.HelpIntent\":\r\n return get_welcome_response()\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return handle_session_end_request()\r\n else:\r\n raise ValueError(\"Invalid intent\")", "def dispatch(intent_request):\n\n #logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n print(\"asdd\")\n print(intent_name)\n \n # Dispatch to your bot's intent handlers\n if intent_name == 'GreetingIntent':\n return greeting_intent(intent_request)\n elif intent_name == 'DiningSuggestionsIntent':\n return dining_suggestion_intent(intent_request)\n elif intent_name == 'ThankYouIntent':\n return thank_you_intent(intent_request)\n\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def dispatch(intent_request):\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'ScheduleMeeting':\n return schedule_meeting(intent_request)\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"AddToCart\":\n return quary(intent, session)\n else:\n print(\"invalid intent\")\n raise ValueError(\"Invalid intent\")", "def dispatch(intent_request):\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'VCH_Policies':\n return respond(intent_request, 'Policies')\n\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def dispatch(intent_request):\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'bitbotSetNewAlert':\n return set_currency_alert(intent_request)\n # elif intent_name == 'Temp':\n # return set_currency_alert(intent_request)\n\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def dispatch(intent_request):\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n \n if intent_name == 'BillInquiry':\n return billinquiry(intent_request)\n elif intent_name == 'billpayment': \n return billpayment(intent_request)\n elif intent_name == 'RebootSystem': \n return rebootsystem(intent_request)\n else:\n return default_answer(intent_request)\n\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def handle_answer_request(intent, session):\n\n eins_list = [\"eins\", \"ein\", \"einer\", \"eine\", \"einen\", \"eines\", \"einem\"]\n \n if intent[\"name\"] == \"DontKnowIntent\":\n answer = \"weiß nicht\"\n elif \"Nummer\" in intent[\"slots\"].keys() and \"value\" in intent[\"slots\"][\"Nummer\"]:\n answer = intent[\"slots\"][\"Nummer\"][\"value\"]\n elif \"Antworten\" in intent[\"slots\"].keys() and \"value\" in intent[\"slots\"][\"Antworten\"]:\n answer = intent[\"slots\"][\"Antworten\"][\"value\"]\n else:\n answer = \"Fehler\"\n \n #Necessary to recognize \"1\":\n if answer in eins_list:\n answer = \"1\"\n elif answer == \"ein mal\":\n answer = \"einmal\"\n answer = answer.lower()\n\n print(\"handle_answer_request: \", intent, \"answer: \", answer)\n\n if \"attributes\" not in session:\n return start_game(answer, session)\n elif session[\"attributes\"][\"state\"] == \"Gameon\":\n return check_answer(answer, session)\n elif session[\"attributes\"][\"state\"] == \"Start\":\n return start_game(answer, session)\n\n return start_game(answer, session)", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])", "def dispatch(intent_request):\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'Help':\n return order_help(intent_request)\n elif intent_name == 'FastFood':\n return order_snacks(intent_request)\n elif intent_name == 'Beverages':\n return order_beverages(intent_request)\n elif intent_name == 'Admin':\n return admin(intent_request)\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def on_intent(intent_request, session):\r\n\r\n print(\"on_intent requestId=\" + intent_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n \r\n if intent_name == \"unsafe\":\r\n send_message_alerts()\r\n session_attributes = {}\r\n card_title = \"Welcome, this is Emma\"\r\n speech_output = \"Calling police, Connected with police , Police on the way. Police will be in 1 min . Your relatives and frieds are all informed. Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me\"\r\n \r\n # If the user either does not reply to the welcome message or says something\r\n # that is not understood, they will be prompted again with this text.\r\n reprompt_text = \"Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me, Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me \"\r\n \r\n should_end_session = False\r\n return build_response(session_attributes, build_speechlet_response(\r\n card_title, speech_output, reprompt_text, should_end_session))\r\n \r\n \r\n \r\n elif intent_name == \"AMAZON.HelpIntent\":\r\n return get_welcome_response()\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return handle_session_end_request()\r\n else:\r\n raise ValueError(\"Invalid intent\")", "def dispatch(intent_request):\n\n intent_name = intent_request[\"currentIntent\"][\"name\"]\n\n # Dispatch to bot's intent handlers\n if intent_name == \"recommendPortfolio\":\n return recommend_portfolio(intent_request)\n\n raise Exception(\"Intent with name \" + intent_name + \" not supported\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"MySubredIntent\":\n return set_subred_in_session(intent, session)\n elif intent_name == \"ReadSubredIntent\":\n return get_subreddit_from_session(intent, session)\n elif intent_name == \"StopIntent\":\n return on_session_stopped(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session, state):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n \n \n # If new user, and intent is not setting up, prompt to set up first\n # If corrupted user, prompt to set up again\n userId = session[\"user\"][\"userId\"]\n query_user = get_info(userId)\n print(query_user)\n if (len(query_user) == 0 and intent_name != \"NewUserCollectInfoIntent\") or \\\n (len(query_user) > 0 and len(query_user[0].keys()) != NUM_DB_COLS):\n if len(query_user) > 0 and len(query_user[0].keys()) != NUM_DB_COLS:\n delete_info(userId)\n \n return new_user_intro(session, state)\n\n handlers = {\n \"GetMainFocusIntent\": get_main_focus_intent_response,\n \"CheckinKeepMainFocusIntent\": keep_main_focus_intent,\n \"CheckinReplaceMainFocusIntent\": replace_main_focus_intent,\n \"ExecuteMorningRoutineIntent\": execute_morning_routine_intent,\n \"ExecuteEveningRoutineIntent\": execute_evening_routine_intent,\n \"AMAZON.YesIntent\": handle_yes_intent,\n \"AMAZON.NoIntent\": handle_no_intent,\n \"AMAZON.CancelIntent\": handle_session_end_request,\n \"AMAZON.StopIntent\": handle_session_end_request,\n }\n \n # Handlers that need more arguments\n if intent_name not in handlers:\n if intent_name == \"SetMorningRoutineIntent\":\n return set_routine_intent(intent, session, state, MORNING)\n elif intent_name == \"SetEveningRoutineIntent\":\n return set_routine_intent(intent, session, state, EVENING)\n elif intent_name == \"GetMorningRoutineIntent\":\n return get_routine_intent(intent, session, state, MORNING)\n elif intent_name == \"GetEveningRoutineIntent\":\n return get_routine_intent(intent, session, state, EVENING)\n elif intent_name == \"NewUserCollectInfoIntent\":\n return new_user_collect_info_intent(intent_request, session, state)\n elif intent_name == \"SetNameIntent\":\n return set_name_intent(intent_request, session, state)\n \n try:\n return handlers[intent_name](intent, session, state)\n except Exception as e:\n # This exception probably came from inside a handler\n print(e)\n raise ValueError(\"Invalid intent: \"+intent_name)", "def on_intent(intent_request, session):\r\n\r\n print(\"on_intent requestId=\" + intent_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n \r\n #print (intent)\r\n try :\r\n intent_name_value = intent['slots']['friend_name']['value']\r\n except :\r\n print(\"**** Can't find name\")\r\n\r\n try: \r\n intent_gender_value = intent['slots']['gender']['value']\r\n # print(\"****intent_gender_value: \" + intent_name_value)\r\n except :\r\n print(\"**** Can't find gender\")\r\n\r\n #friend_name = intent_value\r\n print(\"****session: \" + str(session))\r\n print(\"****Intent found is: \" + str(intent))\r\n print(\"****Intent Name found is: \" + str(intent_name))\r\n #print(\"****intent_gender_value found is: \" + str(intent_gender_value))\r\n # Dispatch to your skill's intent handlers\r\n if intent_name == \"welcomeIntent\" and (intent_gender_value == \"her\" or intent_gender_value == \"she\"):\r\n return say_hello_to_girl(intent_name_value)\r\n elif intent_name == \"welcomeIntent\" and (intent_gender_value == \"his\" or intent_gender_value == \"he\"):\r\n return say_hello_to_boy(intent_name_value) \r\n elif intent_name == \"jokeIntent\" :\r\n return joke_story(session)\r\n elif intent_name == \"foodIntent\" :\r\n return favorite_food(session)\r\n elif intent_name == \"secretIntent\" :\r\n return secret_story(session)\r\n elif intent_name == \"songIntent\" :\r\n return favorite_song(session)\r\n elif intent_name == \"quoteIntent\" :\r\n return favorite_quote(session)\r\n elif intent_name == \"gameIntent\" :\r\n return favorite_game(session)\r\n elif intent_name == \"AMAZON.HelpIntent\":\r\n return get_welcome_response()\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return handle_session_end_request(session)\r\n elif intent_name == \"AMAZON.FallbackIntent\":\r\n return handle_session_end_request()\r\n else:\r\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"<YOUR INTENT NAME HERE>\":\n # Update the wordsmith_data variable with your data. Use key, value\n # pairs where the key is the column name in Wordsmith and the value is\n # the value contained in that column\n wordsmith_data = { 'column1': 'value1', 'column2': 'value2' }\n narrative = wordsmith.generate(WORDSMITH_API_KEY, WORDSMITH_PROJECT_SLUG, WORDSMITH_TEMPLATE_SLUG, wordsmith_data)\n if 'errors' not in narrative:\n return build_response(session.get('attributes', {}), build_speechlet_response('Wordsmith Generated Response', narrative['data']['content'],\n '<REPROMPT TEXT HERE>', True))\n else:\n if not isinstance(narrative['errors'], list) :\n return build_response(session.get('attributes', {}), build_speechlet_response('Wordsmith Generation Error', 'Wordsmith reported the following error: {}'.format(narrative['errors']['detail']),\n '<REPROMPT TEXT HERE>', True))\n else:\n details = ', '.join([e['details'] for e in narrative['errors']])\n return build_response(session.get('attributes', {}), build_speechlet_response('Wordsmith Generation Error', 'Wordsmith reported the following error: {}'.format(details),\n '<REPROMPT TEXT HERE>', True))\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def post(self, request, data):\n hass = request.app['hass']\n\n intent_result = yield from _process(hass, data['text'])\n\n if intent_result is None:\n intent_result = intent.IntentResponse()\n intent_result.async_set_speech(\"Sorry, I didn't understand that\")\n\n return self.json(intent_result)", "async def post(self, request, data):\n hass = request.app[\"hass\"]\n\n try:\n intent_name = data[\"name\"]\n slots = {\n key: {\"value\": value} for key, value in data.get(\"data\", {}).items()\n }\n intent_result = await intent.async_handle(\n hass, DOMAIN, intent_name, slots, \"\", self.context(request)\n )\n except intent.IntentHandleError as err:\n intent_result = intent.IntentResponse()\n intent_result.async_set_speech(str(err))\n\n if intent_result is None:\n intent_result = intent.IntentResponse()\n intent_result.async_set_speech(\"Sorry, I couldn't handle that\")\n\n return self.json(intent_result)", "def respond_to_intent(self, intent):\n if type(intent) is BARTQueryIntent:\n return self.respond_to_bart_intent(intent)\n elif type(intent) is BusQueryIntent: \n return self.respond_to_bus_intent(intent)\n else:\n return HelpIntent()", "def intent(req, session):\n intent = req['intent']\n if session.setdefault('attributes', {}) is None:\n # Ensure that there's always a dictionary under \"attributes\".\n session['attributes'] = {}\n\n # If the user has already opened a dialog, handle incorrect\n # Intents from Alexa due to misunderstandings or user error.\n if session['attributes'].get('add_address') and \\\n not intent['name'] in ADD_ADDRESS_INTENTS:\n # Try to recover if Alexa misunderstood\n # an address as a station name.\n if intent['name'] == 'CheckStatusIntent' and \\\n intent['slots'].get('station_name', {}).get('value'):\n intent['name'] = 'AddAddressIntent'\n intent['slots'].setdefault('address_street', {})['value'] = \\\n intent['slots']['station_name']['value']\n else:\n return reply.build(\"I didn't understand that as an address. \"\n \"Please provide an address, such as \"\n \"\\\"123 north State Street\\\".\",\n reprompt=\"What's the street number and name?\",\n persist=session['attributes'],\n is_end=False)\n elif session['attributes'].get('remove_address') and \\\n not intent['name'] in REMOVE_ADDRESS_INTENTS:\n # If the user wanted to remove an address, but didn't\n # give an intelligible response when we requested\n # confirmation, then assume the answer is no.\n intent['name'] = 'AMAZON.NoIntent'\n\n # Dispatch each Intent to the correct handler.\n if intent['name'] == 'CheckBikeIntent':\n if not intent['slots']['bikes_or_docks'].get('value'):\n # If something went wrong understanding the bike/dock\n # value, fall back on the status check.\n return check_status(intent, session)\n else:\n return check_bikes(intent, session)\n elif intent['name'] == 'CheckStatusIntent':\n return check_status(intent, session)\n elif intent['name'] == 'ListStationIntent':\n return list_stations(intent, session)\n elif intent['name'] == 'CheckCommuteIntent':\n return check_commute(intent, session)\n elif intent['name'] == 'AddAddressIntent':\n return add_address(intent, session)\n elif intent['name'] == 'CheckAddressIntent':\n return check_address(intent, session)\n elif intent['name'] == 'RemoveAddressIntent':\n return remove_address(intent, session)\n elif intent['name'] == 'AMAZON.NextIntent':\n return next_intent(intent, session)\n elif intent['name'] == 'AMAZON.YesIntent':\n return yes_intent(intent, session)\n elif intent['name'] == 'AMAZON.NoIntent':\n return no_intent(intent, session)\n elif intent['name'] in ['AMAZON.StopIntent', 'AMAZON.CancelIntent']:\n return reply.build(\"Okay, exiting.\", is_end=True)\n elif intent['name'] == 'AMAZON.HelpIntent':\n return reply.build(\"You can ask me how many bikes or docks are \"\n \"at a specific station, or else just ask the \"\n \"status of a station. Use the %s station \"\n \"name, such as \\\"%s\\\". \"\n \"If you only remember one cross-street, you \"\n \"can ask me to list all stations on a particular \"\n \"street. If you've told me to \\\"add an address\\\", \"\n \"I can remember that and use it when you \"\n \"ask me to \\\"check my commute\\\". \"\n \"What should I do?\" %\n (config.network_name, config.sample_station),\n persist=session['attributes'],\n is_end=False)\n else:\n return reply.build(\"I didn't understand that. Try again?\",\n persist=session['attributes'],\n is_end=False)", "def dispatch(intent_request):\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n return diningsuggestions_intent(intent_request)", "def lambda_handler(event, context):\r\n print(\"Incoming request...\")\r\n\r\n \"\"\"\r\n Uncomment this if statement and populate with your skill's application ID to\r\n prevent someone else from configuring a skill that sends requests to this\r\n function.\r\n \"\"\"\r\n # if (event['session']['application']['applicationId'] !=\r\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\r\n # raise ValueError(\"Invalid Application ID\")\r\n\r\n if event['session']['new']:\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])", "def process_intent(self, intent: Intent, game: Game):\n return intent", "def dispatch(intent_request):\n logger.debug(\n 'dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n logger.debug('HERE {}'.format(intent_request['currentIntent']['name']))\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'createCuration':\n print(\"In createCuration\", intent_name)\n return find_suggestion(intent_request)\n\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def lambda_handler(event, context):\n logger.info(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"Check that this is being called by our skill\"\"\"\n logger.info(\"Calling app: \"+str(event['session']['application']['applicationId']))\n if (event['session']['application']['applicationId'] !=\n \"amzn1.ask.skill.\"+skill_id):\n logger.error(\"Invalid application ID\")\n raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started(event, {'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event, event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event, event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event, event['request'], event['session'])\n\n # Otherwise deal with it gracefully\n logger.info(\"Unexpected request type:\")\n logger.info(json.dumps(event))\n return build_response({}, build_speechlet_response(\"Leeds Bins\", \"Welcome to Leeds Bins. Now you can find out which waste bins to take out when. Try asking: what's my next collection.\", None, False))", "def handler(event, context):\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n #if (event['session']['application']['applicationId'] != \"<APPLICATION_ID>\"):\n # raise ValueError(\"Invalid Application ID\")\n\n\n if event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"Incoming request...\")\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n if (event['session']['application']['applicationId'] !=\n \"amzn1.ask.skill.2994421a-75ef-4502-9d4a-bf83f20a7ade\"):\n raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def test_dispatch_intent(self):\n @self.skill.intent('test_intent')\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n self.skill.response.sessionAttributes['run'] = True\n self.skill.request.request.type = 'IntentRequest'\n self.skill.request.request.intent = interface.Intent()\n self.skill.request.request.intent.name = 'test_intent'\n self.skill.dispatch()\n self.assertTrue(self.skill.response.sessionAttributes['run'])", "async def test_intent(self, dm):\n request = create_request(\"other\", \"intent\")\n result = await dm.apply_handler(request, create_responder(request))\n assert result.dialogue_state == \"intent\"", "async def handle_request(self, request: aioweb.request.Request):", "def handle_intent(intent_name):\n if intent_name in name_to_handler:\n return name_to_handler[intent_name]()\n else:\n return question_answer(intent_name)", "def process_request(self, request):\n self.req = request\n command = self.get_command()\n file_handler = filehandler.FileHandler(command)\n file_handler.handle_command()\n return command.result", "def lambda_handler(event, context):\n if DEBUG:\n print(\"event : {}\".format(json.dumps(event)))\n\n if event['session']['new']:\n if DEBUG:\n print(\"on_session_started requestId=\" + event['request']['requestId'] + \", sessionId=\" + event['session']['sessionId'])\n if event['request']['type'] == \"LaunchRequest\":\n if DEBUG:\n print(\"on_launch requestId=\" + event['request']['requestId'] + \", sessionId=\" + event['session']['sessionId'])\n return get_start_end_response(False)\n elif event['request']['type'] == \"IntentRequest\":\n if DEBUG:\n print(\"on_intent requestId=\" + event['request']['requestId'] + \", sessionId=\" + event['session']['sessionId'])\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n if DEBUG:\n print(\"on_session_ended requestId=\" + event['request']['requestId'] + \", sessionId=\" + event['session']['sessionId'])", "def lambda_handler(event, context):\r\n print(\"event.session.application.applicationId=\" +\r\n event['session']['application']['applicationId'])\r\n\r\n\r\n if event['session']['new']:\r\n #print (\"**** Reached\")\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n #print(\"**** Intent coming is : \" + event['request']['type'])\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])", "def handle_request_from(self, user, request):\n request_type = request.request_type\n\n if request_type in self._plain_requests:\n ret = Response(\n request_type,\n data=self._plain_requests[request_type]()\n )\n elif request_type in self._user_requests and not user:\n ret = self._no_user_response(request_type)\n elif request_type in self._user_requests:\n ret = Response(\n request_type,\n data=self._user_requests[request_type](user)\n )\n else:\n ret = self._complex_requests[request_type](user, request.data)\n\n if ret.success:\n self._operation_count = \\\n (self._operation_count + 1) % self._save_frequency\n if self._operation_count == 0:\n self._users.commit()\n\n return ret", "def lambda_handler(event, context):\n logging.info(event)\n current_time = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n is_conversation_result = 'Details' in event\n if is_conversation_result:\n combine_bot_state_to_s3(event, current_time)\n else:\n save_bot_state_to_s3(event, current_time)\n\n # Generate response back to bot\n response = dict()\n if not is_conversation_result:\n response = {\n 'dialogAction': {\n 'type': 'Delegate',\n 'slots': event['currentIntent']['slots']\n }\n }\n logging.info(response)\n return response", "def lambda_handler(event, context):\n # By default, treat the user request as coming from the America/New_York time zone.\n \n os.environ['TZ'] = 'America/New_York'\n time.tzset()\n logger.debug('event.bot.name={}'.format(event['bot']['name']))\n intent_name = event['currentIntent']['name']\n \n return dispatch(event)", "def handle(req):\n return logic(req)", "def async_handle(self, intent_obj):\n hass = intent_obj.hass\n slots = self.async_validate_slots(intent_obj.slots)\n name = slots['name']['value']\n entity = _match_entity(hass, name)\n\n if not entity:\n _LOGGER.error(\"Could not find entity id for %s\", name)\n return None\n\n yield from hass.services.async_call(\n core.DOMAIN, SERVICE_TURN_ON, {\n ATTR_ENTITY_ID: entity.entity_id,\n }, blocking=True)\n\n response = intent_obj.create_response()\n response.async_set_speech(\n 'Turned on {}'.format(entity.name))\n return response", "def __init__(self, intent=None):\n super(IntentRequest, self).__init__()\n default_attr = dict(intent=Intent())\n self.intent = intent\n self._set_default_attr(default_attr)", "def decide_action(self):\t\t\t\t\t#defining the function to decide the action\n recognizer, audio = self.speech.listen_for_audio()\t\t#listening for the audio\n\n # received audio data, now we'll recognize it using Google Speech Recognition\n speech = self.speech.google_speech_recognition(recognizer, audio)\t#storing the speech into variable as a text\n\n if speech is not None:\t\t#if speech is not recognized\n try:\n req = requests.get('https://api.wit.ai/message?v=20160918&q=%s' % speech,\n headers={\"Authorization\": wit_ai_token})\t\t#getting the wit.ait token and checking it\n print req.text\t\t\t#printing the text\n json_responce = json.loads(req.text)\t\t#printing the responce\n entities = None\t\t\t#inititaling the entities\n intent = None\t\t\t#initialising the intent\n if 'entities' in json_responce and 'Intent' in json_responce['entities']:\t#checking the the intents and entitites\n entities = json_responce['entities']\t\t#entities \n intent = json_responce['entities']['Intent'][0][\"value\"]\t#intents \n\n print intent\t#printing the intents\n if intent == 'greeting':\t#checking the intent type\n self.__text_action(self.nlg.greet()) #getting the function of the intent\n elif intent == 'snow white':\t\t#checking the intent type\n self.__text_action(self.nlg.snow_white())\t\t#getting the function of the intent\n elif intent == 'weather':\t\t#checking the intent type\n self.__weather_action(entities)\t#getting the function of the intent\n elif intent == 'news':\t\t\t#checking the intent type\n self.__news_action()\t#getting the function of the intent\n elif intent == 'maps':\t\t\t#getting the function of the intent\n self.__maps_action(entities)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'holidays':\t\t#getting the function of the intent#checking the intent type\n self.__holidays_action()\t\t\t#getting the function of the intent#checking the intent type\n elif intent == 'appearance':\t\t#getting the function of the intent#checking the intent type\n self.__appearance_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'user status':\t\t#getting the function of the intent#checking the intent type\n self.__user_status_action(entities)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'user name':\t\t\t#getting the function of the intent#checking the intent type\n self.__user_name_action()\t\t\t#getting the function of the intent#checking the intent type\n elif intent == 'personal status':\t\t#getting the function of the intent#checking the intent type\n self.__personal_status_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'joke':\t\t\t#getting the function of the intent#checking the intent type\n self.__joke_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'insult':\t\t#getting the function of the intent#checking the intent type\n self.__insult_action()\t#getting the function of the intent#checking the intent type\n return\t\t\t\t#retuning\n elif intent == 'appreciation':\t\t\t#getting the function of the intent#checking the intent type\n self.__appreciation_action()\t\t\t#getting the function of the intent#checking the intent type\n return\n elif intent == 'music':\t\t\t#getting the function of the intent#checking the intent type\n self.__music_action(music_file)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'navigation':\t\t\t#getting the function of the intent#checking the intent type\n self.__navigate_action()\n elif intent == 'tasks':\n self.__calender_events()\n\t\telif intent == 'guide':\n self.__guide()\n elif intent == 'web':\n self.__web()\n elif intent == 'video':\n self.__video()\n else: # No recognized intent\n self.__text_action(\"I'm sorry, I don't know about this yet.\")\n return\n\n except Exception as e:\n print \"Failed wit !\"\t\t\t#error message\n print(e)\t\t\t#printing the error\n traceback.print_exc()\n self.__text_action(\"I'm sorry, I couldn't understand what you mean !!\") #printing message\n return\t\t\t\t\n\n self.decide_action()", "def zari_webhook(request):\n request_json = request.get_json()\n print(request_json)\n if request_json[\"queryResult\"] != None:\n text = request_json[\"queryResult\"][\"queryText\"]\n intent = request_json[\"queryResult\"][\"intent\"][\"displayName\"]\n print(intent)\n print(intent == \"VerZapatos\")\n parameters = request_json[\"queryResult\"][\"parameters\"]\n \n if intent == \"VerZapatos\":\n return compose_response(ver_zapatos(parameters))\n \n return compose_response(f\"Hola mundo sin intent {text}\")\n\n #if request.args and 'message' in request.args:\n # return request.args.get('message')\n #elif request_json and 'message' in request_json:\n # return request_json['message']\n #else:\n # return f'Hello World!'", "def housepredict(intent_request):\r\n \r\n \r\n location_zip = get_slots(intent_request)[\"location\"]\r\n housetype_zip = get_slots(intent_request)[\"housetype\"]\r\n source = intent_request['invocationSource']\r\n \r\n print('received request: ' + str(intent_request))\r\n print (\"housetype\",housetype_zip)\r\n print (\"location1\",location_zip)\r\n\r\n if source == 'DialogCodeHook':\r\n # Perform basic validation on the supplied input slots.\r\n # Use the elicitSlot dialog action to re-prompt for the first violation detected.\r\n slots = get_slots(intent_request)\r\n print('slots are' ,str(slots)) \r\n validation_result = validate_housepred(location_zip)\r\n if not validation_result['isValid']:\r\n slots[validation_result['violatedSlot']] = None\r\n return elicit_slot(intent_request['sessionAttributes'],\r\n intent_request['currentIntent']['name'],\r\n slots,\r\n validation_result['violatedSlot'],\r\n validation_result['message'])\r\n\t\t\r\n validation_result2 = validate_housepred_hstyp(housetype_zip)\r\n if not validation_result2['isValid']:\r\n slots[validation_result2['violatedSlot']] = None\r\n return elicit_slot(intent_request['sessionAttributes'],\r\n intent_request['currentIntent']['name'],\r\n slots,\r\n validation_result2['violatedSlot'],\r\n validation_result2['message'])\r\n\r\n # Pass the price of the flowers back through session attributes to be used in various prompts defined\r\n # on the bot model.\r\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\r\n if location_zip is not None and housetype_zip is not None:\r\n output_session_attributes['Price'] = house_price_pred(location_zip,housetype_zip)#len(location_zip)*5#house_price_pred(location_zip,housetype_zip) \r\n #price = house_price_pred(location_zip,housetype_zip)# Elegant pricing model\r\n\t\t\t\r\n return delegate(output_session_attributes, get_slots(intent_request))\r\n\r\n # Order the flowers, and rely on the goodbye message of the bot to define the message to the end user.\r\n # In a real bot, this would likely involve a call to a backend service.\r\n print(intent_request['sessionAttributes']['Price']) \r\n return close(intent_request['sessionAttributes'],\r\n 'Fulfilled',\r\n {'contentType': 'PlainText',\r\n 'content': 'Approx. next year growth prediction for {hstyp} in {loc} is {prc}%'.format(hstyp=housetype_zip,loc=location_zip,prc=intent_request['sessionAttributes']['Price'])})", "async def _handle_request(self, request: web.Request) -> web.Response:\n event = await request.json()\n # This handler will be called on the server thread. Call the external\n # handler on the app thread.\n self._main_loop.call_soon_threadsafe(self.handle_event, event)\n return web.Response(text=\"OK\")", "def execute_request(self):\n print(\n self.LOG_CLASS,\n '[method: main]',\n 'MyCityDataModel received:\\n',\n str(self._mcd)\n )\n\n # TODO: This section should be generalized for all platforms if possible\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID \n to prevent someone else from configuring a skill that sends requests to \n this function.\n \"\"\"\n # if (mcd.application_id !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if self._mcd.is_new_session:\n self.on_session_started()\n\n if self._mcd.request_type == \"LaunchRequest\":\n return self.on_launch()\n elif self._mcd.request_type == \"IntentRequest\":\n return self.on_intent()\n elif self._mcd.request_type == \"SessionEndedRequest\":\n return self.on_session_ended()", "def lambda_handler(event, context):\n print('HANDLING EVENT')\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def process_request(self, request):\n raise NotImplementedError('process_request not implemented in BaseService')", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])\n else:\n print (\"********************** Unknown Request\")", "def schedule_meeting(intent_request):\n \n meeting_person = intent_request['currentIntent']['slots']['Person']\n meeting_type = intent_request['currentIntent']['slots']['MeetingType']\n meeting_date = intent_request['currentIntent']['slots']['Date']\n meeting_time = intent_request['currentIntent']['slots']['Time']\n meeting_duration = intent_request['currentIntent']['slots']['Duration']\n meeting_address = intent_request['currentIntent']['slots']['Address']\n invitation_link = intent_request['currentIntent']['slots']['InvitationLink']\n phone_number = intent_request['currentIntent']['slots']['Phone']\n source = intent_request['invocationSource']\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\n booking_map = json.loads(try_ex(lambda: output_session_attributes['bookingMap']) or '{}')\n\n if source == 'DialogCodeHook':\n # Perform basic validation on the supplied input slots.\n slots = intent_request['currentIntent']['slots']\n validation_result = validate_schedule_meeting(meeting_duration, date, meeting_time)\n if not validation_result['isValid']:\n slots[validation_result['violatedSlot']] = None\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n slots,\n validation_result['violatedSlot'],\n validation_result['message']\n )\n\n if not meeting_person:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'Person',\n {'contentType': 'PlainText', 'content': 'Who is gonna be that with?'}\n )\n \n if meeting_person and not meeting_type:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'MeetingType',\n {'contentType': 'PlainText', 'content': 'What type of meeting would you like to schedule?'}\n )\n\n if meeting_person and meeting_type and not meeting_date:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'Date',\n {'contentType': 'PlainText', 'content': 'When would you like to schedule your {} ?'.format(meeting_type)}\n )\n\n if meeting_type and meeting_date:\n # Fetch or generate the availabilities for the given date.\n booking_availabilities = try_ex(lambda: booking_map[meeting_date])\n if booking_availabilities is None:\n booking_availabilities = get_availabilities(meeting_date)\n booking_map[meeting_date] = booking_availabilities\n output_session_attributes['bookingMap'] = json.dumps(booking_map)\n\n meeting_type_availabilities = get_availabilities_for_duration(get_duration(meeting_type), booking_availabilities)\n if len(meeting_type_availabilities) == 0:\n # No availability on this day at all; ask for a new date and time.\n slots['Date'] = None\n slots['Time'] = None\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n slots,\n 'Date',\n {'contentType': 'PlainText', 'content': 'There is not any availability on that date, is there another day which works for you?'}\n )\n\n message_content = 'What time on {} works for you? '.format(meeting_date)\n if meeting_time:\n output_session_attributes['formattedTime'] = build_time_output_string(meeting_time)\n # Validate that proposed time for the meeting can be booked by first fetching the availabilities for the given day. To\n # give consistent behavior in the sample, this is stored in sessionAttributes after the first lookup.\n if is_available(meeting_time, get_duration(meeting_type), booking_availabilities):\n return delegate(output_session_attributes, slots)\n message_content = 'The time you requested is not available. '\n\n if len(meeting_type_availabilities) == 1:\n # If there is only one availability on the given date, try to confirm it.\n slots['Time'] = meeting_type_availabilities[0]\n return confirm_intent(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n slots,\n {\n 'contentType': 'PlainText',\n 'content': '{}{} is our only availability, does that work for you?'.format\n (message_content, build_time_output_string(meeting_type_availabilities[0]))\n },\n build_response_card(\n 'Confirm Meeting',\n 'Is {} on {} okay?'.format(build_time_output_string(meeting_type_availabilities[0]), date),\n [{'text': 'yes', 'value': 'yes'}, {'text': 'no', 'value': 'no'}]\n )\n )\n\n available_time_string = build_available_time_string(meeting_type_availabilities)\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n slots,\n 'Time',\n {'contentType': 'PlainText', 'content': '{}{}'.format(message_content, available_time_string)},\n build_response_card(\n 'Specify Time',\n 'What time works best for you?',\n build_options('Time', meeting_type, meeting_date, booking_map)\n )\n )\n \n if meeting_type = 'online' and meeting_person and meeting_date and meeting_time and not invitation_link:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'InvitationLink',\n {'contentType': 'PlainText', 'content': 'Can you paste your invitation link in here, please?'}\n )\n \n if (meeting_type = 'personal' or meeting_type = 'inperson') and meeting_person and meeting_date and meeting_time and not meeting_address:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'Address',\n {'contentType': 'PlainText', 'content': 'Where the {} will take place?', .format(meeting_type)}\n )\n \n if meeting_person and meeting_type and meeting_date and meeting_time and (invitation_link or meeting_address) and not contact_phone\"\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'Phone',\n {'contentType': 'PlainText', 'content': 'Can you leave your contact phone number here, please?'}\n\n return delegate(output_session_attributes, slots)\n \n \n \"\"\" --- Check avalibility --- \"\"\"\n\n\n # Book the meeting.\n booking_availabilities = booking_map[meeting_date]\n if booking_availabilities:\n # Remove the availability slot for the given date as it has now been booked.\n booking_availabilities.remove(meeting_time)\n if meeting_duration == 60:\n second_half_hour_time = increment_time_by_thirty_mins(meeting_time)\n booking_availabilities.remove(second_half_hour_time)\n\n booking_map[date] = booking_availabilities\n output_session_attributes['bookingMap'] = json.dumps(booking_map)\n else:\n # This is not treated as an error as this code sample supports functionality either as fulfillment or dialog code hook.\n logger.debug('Availabilities for {} were null at fulfillment time. '\n 'This should have been initialized if this function was configured as the dialog code hook'.format(meeting_date))\n\n return close(\n output_session_attributes,\n 'Fulfilled',\n {\n 'contentType': 'PlainText',\n 'content': 'Okay, I have booked your meeting. See you at {} on {}'.format(build_time_output_string(meeting_time), meeting_date)\n }\n )", "def __call__(self, request):\n response = self.get_request(request)\n return response", "def process_request(self, fleet_request):\n ts = fleet_request.ts_req # starting time\n dt = int(fleet_request.sim_step.total_seconds()) #fleet_request.sim_step # Sim_step is how long a simulation time step\n # dt in timedelta format\n p_req = fleet_request.P_req\n q_req = fleet_request.Q_req\n\n # call run function with proper inputs\n resp = self.run(p_req, q_req, self.SOC, self.time, dt, ts) \n\n return resp", "def on_intent (intent_request, session):\n\n global session_attributes\n\n p_node = \"\"\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n try:\n print(\"session previous node name is\" + session_attributes['previous_node'])\n p_node = session_attributes['previous_node']\n\n except KeyError:\n print(\"No previous node yet\")\n\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n print (\"intent name is \" + str(intent_name) + \"p node is \"+str(p_node))\n\n # Dispatch to your skill's intent handlers\n if (intent_name == \"Q_two_Retirement\"):\n return know_retirement(intent, session)\n elif (intent_name == \"Q_three_existing_participants\" and p_node==\"know_retirement\"):\n return Account_401k(intent, session)\n elif (intent_name == \"Q_four_which_company\" and p_node==\"Account_401k\"):\n return Company_Name(intent, session)\n elif (intent_name == \"Q_five_employee_number\" and p_node==\"Company_Name\"):\n return Employee_number(intent, session)\n elif (intent_name == \"Q_six_secret_phrase\" and p_node==\"Employee_number\"):\n return Check_secret_phrase(intent, session)\n elif (intent_name == \"Q_seven_about_service\" and p_node==\"Check_secret_phrase\"):\n return More_about_service(intent, session)\n elif (intent_name == \"Q_eight_get_started\"):\n return Get_started(intent, session)\n elif (intent_name == \"investment_behavior\"):\n return Collect_investment_behavior(intent, session)\n elif (intent_name == \"Q_twleve_current_income_retirement\"):\n return Current_income_portion(intent, session)\n elif (intent_name == \"Q_thirteen_final_confirm\" and p_node==\"Current_income_portion\"):\n return Final_confirm(intent,session)\n elif intent_name == \"Case_two_voice_password\":\n return peers_compare(intent,session)\n elif intent_name == \"Case_two_new_allocation\":\n return send_chart_moderate(intent, session)\n elif intent_name == \"Case_two_aggressive\":\n return send_chart_aggressive(intent, session)\n elif intent_name == \"Case_two_much_better\":\n return much_better_allocation(intent, session)\n # elif intent_name == \"Force_with_you\":\n # return mayForceBeWithYou(intent, session)\n # elif intent_name == \"SendPicture\":\n # return sendpicture(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n elif p_node==\"know_retirement\":\n # in case an intent is not identified, but remember the previous step, repeat the previous intent\n return know_retirement(intent, session)\n elif p_node==\"Account_401k\":\n # in case an intent is not identified, but remember the previous step, repeat the previous intent\n return Account_401k(intent, session)\n elif p_node==\"Company_Name\":\n # in case an intent is not identified, but remember the previous step, repeat the previous intent\n return Company_Name(intent, session)\n elif p_node==\"Employee_number\":\n # in case an intent is not identified, but remember the previous step, repeat the previous intent\n return Employee_number(intent, session)\n elif p_node==\"Check_secret_phrase\":\n # in case an intent is not identified, but remember the previous step, repeat the previous intent\n return Check_secret_phrase(intent, session)\n elif p_node==\"Current_income_portion\":\n # in case an intent is not identified, but remember the previous step, repeat the previous intent\n return Current_income_portion(intent, session)\n else:\n raise ValueError(\"Invalid intent\")", "def handle(self) -> None:\r\n\r\n if self.data.get(\"message-id\") != None:\r\n if self.data[\"status\"] == \"error\":\r\n print(self.data[\"error\"])\r\n return\r\n else:\r\n requestData = self.obs.pendingResponses.pop(self.data[\"message-id\"])\r\n request = requestData[\"request-type\"]\r\n #Requests as of version 4.8.0\r\n\r\n #General\r\n if request == \"GetVersion\":\r\n pass\r\n\r\n elif request == \"GetAuthRequired\":\r\n if self.data[\"authRequired\"]:\r\n secret_string: str = self.obs.password + self.data[\"salt\"]\r\n secret_hash: sha256 = sha256(secret_string.encode(\"utf-8\"))\r\n secret: bytes = b64encode(secret_hash.digest())\r\n\r\n response_string: str = secret.decode(\"utf-8\") + self.data[\"challenge\"]\r\n response_hash: sha256 = sha256(response_string.encode(\"utf-8\"))\r\n response: bytes = b64encode(response_hash.digest())\r\n\r\n self.obs.requests.append({\r\n \"type\": \"Authenticate\",\r\n \"auth\": response.decode(\"utf-8\")})\r\n\r\n else:\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"Authenticate\":\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"SetHeartbeat\":\r\n #To be removed in 5.0.0\r\n pass\r\n\r\n elif request == \"SetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetStats\":\r\n pass\r\n\r\n elif request == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n elif request == \"GetVideoInfo\":\r\n pass\r\n\r\n elif request == \"OpenProjector\":\r\n pass\r\n\r\n elif request == \"TriggerHotkeyByName\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"TriggerHotkeyBySequence\":\r\n #Unreleased\r\n pass\r\n\r\n #Media Control\r\n elif request == \"PlayPauseMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"RestartMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StopMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"NextMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"PreviousMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaDuration\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"SetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"ScrubMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaState\":\r\n #Unreleased\r\n pass\r\n\r\n #Sources\r\n\r\n elif request == \"GetMediaSourcesList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSourcesList\":\r\n pass\r\n\r\n elif request == \"GetSourceTypesList\":\r\n pass\r\n\r\n elif request == \"GetVolume\":\r\n pass\r\n\r\n elif request == \"SetVolume\":\r\n pass\r\n\r\n elif request == \"GetMute\":\r\n pass\r\n\r\n elif request == \"SetMute\":\r\n pass\r\n\r\n elif request == \"ToggleMute\":\r\n pass\r\n\r\n elif request == \"GetAudioActive\":\r\n pass\r\n\r\n elif request == \"SetSourceName\":\r\n pass\r\n\r\n elif request == \"SetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSourceSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceSettings\":\r\n pass\r\n\r\n elif request == \"GetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"SetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"GetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"SetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"GetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"SetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"GetSpecialSources\":\r\n pass\r\n\r\n elif request == \"GetSourceFilters\":\r\n source = self.obs.getSource(requestData[\"sourceName\"])\r\n if source != None:\r\n for _filter in self.data[\"filters\"]:\r\n source.addFilter(_filter) #type: ignore\r\n\r\n elif request == \"GetSourceFilterInfo\":\r\n pass\r\n\r\n elif request == \"AddFilterToSource\":\r\n pass\r\n\r\n elif request == \"RemoveFilterFromSource\":\r\n pass\r\n\r\n elif request == \"ReorderSourceFilter\":\r\n pass\r\n\r\n elif request == \"MoveSourceFilter\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterVisibility\":\r\n pass\r\n \r\n elif request == \"GetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"SetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"TakeSourceScreenshot\":\r\n pass\r\n\r\n #Outpute\r\n elif request == \"ListOutputs\":\r\n pass\r\n\r\n elif request == \"GetOutputInfo\":\r\n pass\r\n\r\n elif request == \"StartOutput\":\r\n pass\r\n\r\n elif request == \"StopOutput\":\r\n pass\r\n\r\n #Profiles\r\n elif request == \"SetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"GetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"ListProfiles\":\r\n pass\r\n\r\n #Recording\r\n elif request == \"GetRecordingStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopRecording\":\r\n pass\r\n\r\n elif request == \"StartRecording\":\r\n pass\r\n\r\n elif request == \"StopRecording\":\r\n pass\r\n\r\n elif request == \"PauseRecording\":\r\n pass\r\n\r\n elif request == \"ResumeRecording\":\r\n pass\r\n\r\n elif request == \"SetRecordingFolder\":\r\n pass\r\n\r\n elif request == \"GetRecordingFolder\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif request == \"GetReplayBufferStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StartReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"SaveReplayBuffer\":\r\n pass\r\n\r\n #Scene Collections\r\n elif request == \"SetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"GetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"ListSceneCollections\":\r\n pass\r\n\r\n #Scene Items\r\n elif request == \"GetSceneItemList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"SetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"ResetSceneItem\":\r\n pass\r\n\r\n elif request == \"SetSceneItemRender\":\r\n pass\r\n\r\n elif request == \"SetSceneItemPosition\":\r\n pass\r\n\r\n elif request == \"SetSceneItemTransform\":\r\n pass\r\n\r\n elif request == \"SetSceneItemCrop\":\r\n pass\r\n\r\n elif request == \"DeleteSceneItem\":\r\n pass\r\n\r\n elif request == \"AddSceneItem\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"DuplicateSceneItem\":\r\n pass\r\n\r\n #Scenes\r\n elif request == \"SetCurrentScene\":\r\n pass\r\n\r\n elif request == \"GetCurrentScene\":\r\n self.obs.setCurrentScene(self.data[\"name\"])\r\n\r\n elif request == \"GetSceneList\":\r\n for scene in self.data[\"scenes\"]:\r\n self.obs.addScene(scene)\r\n self.obs.setCurrentScene(self.data[\"current-scene\"])\r\n\r\n elif request == \"CreateScene\":\r\n pass\r\n\r\n elif request == \"ReorderSceneItems\":\r\n pass\r\n\r\n elif request == \"SetSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"RemoveSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"GetSceneTransitionOverride\":\r\n pass\r\n\r\n #Streaming\r\n elif request == \"GetStreamingStatus\":\r\n pass\r\n\r\n elif request == \"StartStopStreaming\":\r\n pass\r\n\r\n elif request == \"StartStreaming\":\r\n pass\r\n\r\n elif request == \"StopStreaming\":\r\n pass\r\n\r\n elif request == \"SetStreamSettings\":\r\n pass\r\n\r\n elif request == \"GetStreamSettings\":\r\n pass\r\n\r\n elif request == \"SaveStreamSettings\":\r\n pass\r\n\r\n elif request == \"SendCaptions\":\r\n pass\r\n\r\n #Studio Mode\r\n elif request == \"GetStudioModeStatus\":\r\n pass\r\n\r\n elif request == \"GetPreviewScene\":\r\n pass\r\n\r\n elif request == \"SetPreviewScene\":\r\n pass\r\n\r\n elif request == \"TransitionToProgram\":\r\n pass\r\n\r\n elif request == \"EnableStudioMode\":\r\n pass\r\n\r\n elif request == \"DisableStudioMode\":\r\n pass\r\n\r\n elif request == \"ToggleStudioMode\":\r\n pass\r\n\r\n #Transitions\r\n elif request == \"GetTransitionList\":\r\n pass\r\n\r\n elif request == \"GetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionPosition\":\r\n pass\r\n\r\n else:\r\n print(f\"Unhandled response of type {request} and data {self.data}.\")\r\n\r\n \r\n\r\n else:\r\n event: str = self.data[\"update-type\"]\r\n #Events as of 4.8.0\r\n\r\n #Scenes\r\n if event == \"SwitchScenes\":\r\n self.obs.setCurrentScene(self.data[\"scene-name\"])\r\n\r\n elif event == \"ScenesChanged\":\r\n #self.obs.purgeScenes()\r\n pass\r\n\r\n elif event == \"SceneCollectionChanged\":\r\n pass\r\n\r\n elif event == \"SceneCollectionListChanged\":\r\n pass\r\n\r\n #Transitions\r\n elif event == \"SwitchTransition\":\r\n pass\r\n\r\n elif event == \"TransitionListChanged\":\r\n pass\r\n\r\n elif event == \"TransitionDurationChanged\":\r\n pass\r\n\r\n elif event == \"TransitionBegin\":\r\n pass\r\n\r\n elif event == \"TransitionEnd\":\r\n pass\r\n\r\n elif event == \"TransitionVideoEnd\":\r\n pass\r\n\r\n #Profiles\r\n elif event == \"ProfileChanged\":\r\n pass\r\n\r\n elif event == \"ProfileListChanged\":\r\n pass\r\n\r\n #Streaming\r\n elif event == \"StreamStarting\":\r\n pass\r\n\r\n elif event == \"StreamStarted\":\r\n pass\r\n\r\n elif event == \"StreamStopping\":\r\n pass\r\n\r\n elif event == \"StreamStopped\":\r\n pass\r\n\r\n elif event == \"StreamStatus\":\r\n pass\r\n\r\n #Recording\r\n elif event == \"RecordingStarting\":\r\n pass\r\n\r\n elif event == \"RecordingStarted\":\r\n pass\r\n\r\n elif event == \"RecordingStopping\":\r\n pass\r\n\r\n elif event == \"RecordingStopped\":\r\n pass\r\n\r\n elif event == \"RecordingPaused\":\r\n pass\r\n\r\n elif event == \"RecordingResumed\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif event == \"ReplayStarting\":\r\n pass\r\n\r\n elif event == \"ReplayStarted\":\r\n pass\r\n\r\n elif event == \"ReplayStopping\":\r\n pass\r\n\r\n elif event == \"ReplayStopped\":\r\n pass\r\n\r\n #Other\r\n elif event == \"Exiting\":\r\n pass\r\n\r\n #General\r\n elif event == \"Heartbeat\":\r\n pass\r\n\r\n elif event == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n #Sources\r\n elif event == \"SourceCreated\":\r\n pass\r\n\r\n elif event == \"SourceDestroyed\":\r\n pass\r\n\r\n elif event == \"SourceVolumeChanged\":\r\n pass\r\n\r\n elif event == \"SourceMuteStateChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioDeactivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioActivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioSyncOffsetChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioMixersChanged\":\r\n pass\r\n\r\n elif event == \"SourceRenamed\":\r\n pass\r\n\r\n elif event == \"SourceFilterAdded\":\r\n pass\r\n\r\n elif event == \"SourceFilterRemoved\":\r\n pass\r\n\r\n elif event == \"SourceFilterVisibilityChanged\":\r\n source = self.obs.getSource(self.data[\"sourceName\"])\r\n if source != None:\r\n _filter = source.getFilter(self.data[\"filterName\"]) #type: ignore\r\n if _filter != None:\r\n _filter.setVisible(self.data[\"filterEnabled\"]) #type: ignore\r\n\r\n elif event == \"SourceFiltersReordered\":\r\n pass\r\n\r\n #Media\r\n elif event == \"MediaPlaying\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPaused\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaRestarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStopped\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaNext\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPrevious\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaEnded\":\r\n #Unreleased\r\n pass\r\n\r\n #Scene Items\r\n elif event == \"SceneItemOrderChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemAdded\":\r\n pass\r\n\r\n elif event == \"SceneItemRemoved\":\r\n pass\r\n\r\n elif event == \"SceneItemVisibilityChanged\":\r\n scene = self.obs.getScene(self.data[\"scene-name\"])\r\n if scene != None:\r\n source = scene.getSource(self.data[\"item-name\"]) #type: ignore\r\n if source != None:\r\n source.setVisible(self.data[\"item-visible\"]) #type: ignore\r\n \r\n\r\n elif event == \"SceneItemLockChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemTransformChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemSelected\":\r\n pass\r\n\r\n elif event == \"SceneItemDeselected\":\r\n pass\r\n\r\n #Studio Mode\r\n elif event == \"PreviewSceneChanged\":\r\n pass\r\n\r\n elif event == \"StudioModeSwitched\":\r\n pass\r\n\r\n #Unhandled Events\r\n else:\r\n print(\"Unhandled event with data: \" + str(self.data))", "def unhandled_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n intent_name = get_intent_name(handler_input)\n if intent_name == 'ChallengeBossIntent':\n speech_text = 'You need to be in the boss room to challenge the boss. '\n elif intent_name == 'EnterMazeIntent':\n speech_text = 'You already have a maze in progress. Would you like to resume the maze or discard the maze? '\n elif intent_name == 'ResumeMazeIntent' or intent_name == 'DiscardMazeIntent':\n speech_text = 'You are already in a maze or you don\\'t have a maze in progress. Say enter the maze or discard the maze. '\n elif intent_name == 'LocationIntent':\n speech_text = 'You need to be in a maze to locate yourself. Say enter the maze or resume the maze. '\n elif intent_name == 'MoveIntent':\n speech_text = 'You need to be in a maze to take a move. Say enter the maze or resume the maze. '\n else:\n speech_text = 'I am not sure what you are saying. '\n\n handler_input.response_builder.speak(\n speech_text).set_should_end_session(False)\n return handler_input.response_builder.response", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n league = brasileirao.get()\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'], league)\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def default_answer(intent_request):\n\n #product_type = get_slots(intent_request)[\"ProductStyle\"]\n #itemid = get_slots(intent_request)[\"ItemId\"]\n #itemid='5391020'\n\n return close(intent_request['sessionAttributes'],\n 'Fulfilled',\n {'contentType': 'PlainText',\n 'content': 'Sample Response from default answer Lambda function'})", "def exec_request(self, request: Request, expected_response: ExpectedResponse):\n url = self.get_url_for_endpoint(\n endpoint=request.endpoint,\n method=request.method,\n object_id=request.object_id,\n )\n url_params = request.url_params.copy()\n\n step_name = f\"Send {request.method.name} {url.replace(self._base_url, '')}\"\n if url_params:\n step_name += f\"?{urlencode(url_params)}\"\n with allure.step(step_name):\n response = request.method.function(\n url=url,\n params=url_params,\n json=request.data,\n headers=request.headers,\n )\n\n attach_request_log(response)\n\n status_code_should_be(\n response=response, status_code=expected_response.status_code\n )\n\n if expected_response.body is not None:\n body_should_be(response=response, expected_body=expected_response.body)\n\n return response", "def _event_handler(event_type, slack_event):\n\n team_id = slack_event[\"team_id\"]\n pyBot.find_team(team_id)\n\n if event_type == \"message\":\n sender_id = None\n\n if \"user\" in slack_event[\"event\"]:\n\n sender_id = slack_event[\"event\"][\"user\"]\n\n adapted_message = sr.adapt_message_to_wit(sender_id, slack_event[\"event\"][\"text\"].encode('utf-8'))\n message = wit.treatment(adapted_message, sender_id)\n channel = slack_event[\"event\"][\"channel\"]\n print \"SLACK DEBUG \\n\"\n print message\n pyBot.send_message(sender_id, channel, message)\n\n return HttpResponse(\"OK\", 200)\n\n # ============= Event Type Not Found! ============= #\n # If the event_type does not have a handler\n #message = \"You have not added an event handler for the %s\" % event_type\n # Return a helpful error message\n #channel = slack_event[\"event\"][\"channel\"]\n\n #if \"user\" in slack_event[\"event\"]:\n # pyBot.send_message(channel, message)\n return HttpResponse(\"OK\", 200)", "def _handle_request(self, info, desired=None):\r\n debug_print('%s request:' % info.name)\r\n\r\n editor = info.editor\r\n if ((not editor.is_python_like())\r\n or sourcecode.is_keyword(info.obj)\r\n or editor.in_comment_or_string()):\r\n desired = 'fallback'\r\n\r\n self.pending = (info, desired)\r\n if not self.busy:\r\n self._handle_pending()", "def console_request(self, evt, proto):\n if evt.kind == sugar.transport.ServerMsgFactory.TASK_RESPONSE:\n threads.deferToThread(self.on_broadcast_tasks, evt, proto)" ]
[ "0.70845103", "0.704562", "0.69918424", "0.6985099", "0.69391847", "0.69371176", "0.6915305", "0.68666846", "0.6809054", "0.67749316", "0.67594165", "0.6753595", "0.6742854", "0.67342067", "0.6727003", "0.67222506", "0.6720902", "0.67015886", "0.6687733", "0.6676576", "0.6642403", "0.6639832", "0.6629159", "0.6617987", "0.65805626", "0.6566841", "0.6561894", "0.65460145", "0.65402794", "0.65297014", "0.6525395", "0.6500224", "0.6494684", "0.64907086", "0.648674", "0.6477002", "0.64547384", "0.64434457", "0.643631", "0.6410342", "0.63841736", "0.636459", "0.6357527", "0.63515663", "0.6351082", "0.6348821", "0.632708", "0.63133764", "0.6303331", "0.6296283", "0.62617046", "0.6253097", "0.62299883", "0.6221095", "0.6139019", "0.6100341", "0.609005", "0.6039388", "0.6034774", "0.6033876", "0.60072577", "0.5964647", "0.59399366", "0.5929138", "0.5900974", "0.588164", "0.585086", "0.57889205", "0.5775976", "0.57554096", "0.57198685", "0.57076746", "0.5602635", "0.55784637", "0.5577808", "0.5555392", "0.55489415", "0.5541317", "0.5531717", "0.55310404", "0.55279744", "0.552738", "0.5517139", "0.5503854", "0.54917336", "0.54903436", "0.5476294", "0.54746634", "0.5467666", "0.5466394", "0.5456035", "0.54462314", "0.5431443", "0.54185116", "0.5415179", "0.539437", "0.5391458", "0.53843856", "0.5382239", "0.5371376" ]
0.57320946
70
Called when the user ends the session. Is not called when the skill returns should_end_session=true
def on_session_ended(self): print( self.LOG_CLASS, '[method: on_session_ended]', 'MyCityDataModel received:', str(self._mcd) ) return self._mcd # add cleanup logic here
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_session_end_request():\n card_title = \"Session Ended\"\n speech_output = \"Thank you for trying Cuni Control. \" \\\n \"Have a nice day! \"\n \n # Setting this to true ends the session and exits the skill.\n should_end_session = True\n return build_response({}, build_speechlet_response(\n card_title, speech_output, None, should_end_session))", "async def end_session(self):\n\t\t...", "def on_session_ended(session_ended_request, session):\n print(\"END SESSION\")\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_finish(context):\n pass", "def on_session_ended(session_ended_request, session):", "def sessionEnded(self):\r\n if self.sessionStarted == True: \r\n self.sessionCompleted = True", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\r\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # add cleanup logic here\r", "def on_session_ended(session_ended_request, session):\r\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # add cleanup logic here\r", "def on_session_ended(session_ended_request, session):\r\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # add cleanup logic here\r", "def on_session_ended(session_ended_request, session):\r\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # add cleanup logic here\r", "def on_session_ended(session_ended_request, session):\r\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # add cleanup logic here\r", "def on_session_ended():\n #print(\"on_session_ended\")", "def on_session_ended():\n #print(\"on_session_ended\")", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] + \", sessionId=\" + session['sessionId'])\n \n # Todo: add cleanup logic here", "def handle_session_end_request():\n speech_output = None\n response = response_builders.build_response(session_attributes,\n response_builders.build_speechlet_response(card_title,\n speech_output, reprompt_text, should_end_session))\n return response", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])", "def on_session_ended(session_ended_request, session):\r\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])", "def on_session_ended(event_request, session):\n print(\"=====on_session_ended requestId=\" + event_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n return play_end_message()", "def on_session_ended(event, session_ended_request, session):\n logger.info(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here?", "def on_session_closed(self):\n self.session = None", "def endSession(self):\n if(self.verb >= DLS_VERB_HIGH):\n print \"--Ending session with %s (no action)\" % (self.server)", "def handle_finish_session_request(intent, session):\n \n print(\"handle_finish_session_request\", intent)\n\n return response(speech_response=\"Danke fürs mitspielen!\", should_end_session=True,\n card_text=ABSCHIED_CARD_TEXT)", "def test_dispatch_session_end(self):\n @self.skill.session_ended\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n self.skill.response.sessionAttributes['run'] = True\n self.skill.request.request.type = 'SessionEndedRequest'\n self.skill.dispatch()\n self.assertTrue(self.skill.response.sessionAttributes['run'])", "def set_end_session(self, end):\n self.response.shouldEndSession = end", "def on_session_destroyed(session_context):\n if data.AUTO_SHUTDOWN:\n import sys\n\n sys.exit(\n \"\\033[1;31mThe session has ended - tab closed or timeout. \\n\\n --- Terminating the Forest progam and relinquishing control of port. ---\\033[1;00m\"\n )", "def logout_callback(item):\n yctx.session_end()", "def session_end(self, user):\n self._transport.delete(\"/service/v3/sessions\", self._subject, username=user)", "def end_session(request):\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n del request.session[\"analytics\"]\n\n response = {\n \"redirect_url\": \"/main\"\n }\n\n return JsonResponse(response)", "def close_session(self):\n if self.sma_sid is None:\n return\n yield from self._fetch_json(URL_LOGOUT, {})\n self.sma_sid = None", "def terminate_session():\n token = oidc.user_loggedin and oidc.get_access_token()\n if token and oidc.validate_token(token):\n # Direct POST to Keycloak necessary to clear KC domain browser cookie\n logout_uri = oidc.client_secrets['userinfo_uri'].replace(\n 'userinfo', 'logout')\n data = {\n 'client_id': oidc.client_secrets['client_id'],\n 'client_secret': oidc.client_secrets['client_secret'],\n 'refresh_token': oidc.get_refresh_token()}\n requests.post(logout_uri, auth=BearerAuth(token), data=data)\n\n oidc.logout() # clears local cookie only", "def on_exit(session):\n session.close()", "def on_session_closed(self, session):\n if session.id in self.sessions:\n del self.sessions[session.id]", "async def async_close_session(self) -> None:\n if not self.token:\n return\n\n await self._async_ws_set_function(CMD_LOGOUT, {})\n self.token = None", "def __end_session(session):\n\n # reset agent and game recording info\n session['recording'] = False\n # gather time information and compile stats\n session['endTime'] = time.time()\n\n output_logs(session)\n\n stats = compile_stats(session)\n\n return stats", "def EndSession( self ):\r\n\r\n self._socket.write( 'X' ) \r\n # self._connection.write( 'X' ).flush() \r\n\r\n return self.GetServerResponse()", "def session_ended(self, f):\n self._session_ended_view_func = f\n\n return f", "def default_after_end_session_hook(\n request, id_token=None, post_logout_redirect_uri=None,\n state=None, client=None, next_page=None):\n return None", "def test_process_end(self):\n self.skill.logic = {}\n @self.skill.session_ended\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n pass\n actual = self.skill.process(data.SAMPLE_SESSION_ENDED_REQUEST)\n expected = '\"response\": {\"shouldEndSession\": true}'\n self.assertRegexpMatches(actual, expected)", "def stop(self):\n return self.rpc.call(MsfRpcMethod.SessionStop, [self.sid])", "def session_ended_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n\n speech_text = \"The session ended.\"\n handler_input.response_builder.speak(speech_text)\n\n logger.info(\n \"Session ended with reason: {}\".format(\n handler_input.request_envelope.request.reason))\n return handler_input.response_builder.response", "def end_session(self):\r\n self.web_driver.quit()\r\n self.write_log(\"Web driver ended.\")", "def endGame(self):\n pass", "def close_session(self, message):\n pass", "def end(self, send_logout_to_apis=False, request=None):\n self.ended_at = now()\n self.save()\n\n if send_logout_to_apis and request:\n from oidc_apis.backchannel_logout import send_backchannel_logout_to_apis_in_token_scope\n\n tokens = [se.content_object for se in self.get_elements_by_model(Token)]\n for token in filter(None, tokens):\n send_backchannel_logout_to_apis_in_token_scope(token, request, sid=str(self.id))", "def close(self):\n self.session.close(SessionCloseErrorCode.SESSION_DIED)", "def close(self):\n self.sess.close()\n print(\"Current session closed!\")", "def api_end_game(self):\n pass", "def rstrtmgr_RmEndSession(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwSessionHandle\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def end(self, activity, session):\n raise PermissionDenied(\"Cette activité est fermé.\")", "def on_session_ended(session_ended_request, session):\r\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # add cleanup logic here\r\n global current_x\r\n global current_y\r\n global prev_x\r\n global prev_y\r\n current_x = 0\r\n current_y = 0\r\n prev_x = 0\r\n prev_y = 0", "def set_end_game(self):\n # For now, we just need to set a flag indicating we should end\n # the game. When we check whether we should load another story\n # or repeat a repeating script, this flag will be used to skip\n # back to the main session script, to the end of the game.\n self._end_game = True", "async def shutdown(self, ctx):\n await self.bot.session.close()\n await self.bot.logout()", "def shutdown_session(response):\n db_session.remove()\n return response", "def shutdown_session(response):\n db_session.remove()\n return response", "def session_shutdown(self, session):\n self.remove_session(session)", "def do_logout():\n del session[CURRENT_USER_KEY]", "def _handle_logout(self):\n self.food_service.log_out()\n self._handle_after_logout()", "def close_session_retrial(self, url, payload):\n self.login()\n response = self.__client.post(url=url, headers=self.__headers, payload=json.dumps(payload))\n if response.status_code == HTTP_200_OK:\n logger.warning('Session was closed')\n return\n return", "def end_session(self, session_id):\n params = {\n 'ident': session_id,\n 'sessionclosed': 1 # Requests a session to be closed\n }\n response = urllib2.urlopen(\n self.endpoint, urllib.urlencode(params)).read()\n\n return response == ''", "def on_timeout(self):\n self.logger.debug('id=%d, Session timed out!', self.id)\n self.close(SessionCloseErrorCode.SESSION_DIED)", "def close_session(self):\n self.sess.close()", "def on_cmd_close(self, session, _cmd_list):\n self.reply_text(session, \"closing this (%s) session\" % str(self))\n session.close()\n return False", "def handle_close(self):\n self.clear()\n LOGGER.debug(\"local session closed(%d)\", id(self))\n MOLO_CLIENT_APP.remote_session_dict.pop(id(self), None)\n remote_session = MOLO_CLIENT_APP.remote_session_dict.get(id(self))\n if remote_session:\n remote_session.handle_close()\n self.close()", "def bcp_game_end(self, **kwargs):\n self.player = None\n self.events.post('game_ended', **kwargs)", "def logout(self):\r\n self._api_entrypoint.logout(self._session_token)", "def sign_off(self):\n self.log(\"Bot player signing off.\")\n return self.complete_questionnaire()", "def end(self):\n winners = mafia.str_player_list(self.game.winners())\n logging.info(\"Game over! Winners: %s\" % winners)\n\n subject = \"%s: The End\" % self.name\n body = \"Game over!\\n\\nCongratulations to %s for a well \" \\\n \"(or poorly; I can't tell) played game!\" % winners\n self.send_message(mafia.events.PUBLIC, subject, body)", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def logout(self):\n self.session.disconnect()", "def teardown_session(e):\n my_db.close()\n OT_spider.close()", "def exitApplication(self):\n try:\n self.repository.saveSession()\n print(\"Session saved :).\")\n except IOError:\n print(\"Could not save session. Your work will be lost :(.\")\n\n print(\"Exiting.\")\n exit(0)", "def logout(self):\n spotify.Error.maybe_raise(lib.sp_session_logout(self._sp_session))", "def _teardown(response):\n user_state = '\"\"'\n if 'user.biv_id' in flask.session:\n user_state = 'l'\n if flask.session['user.is_logged_in']:\n user_state += 'i'\n else:\n user_state += 'o'\n user_state += '-' + str(flask.session['user.biv_id'])\n BetterLogger._user_state = user_state", "def shutdown(self):\n logging.info(\"Shutdown\")\n self._sessionmanager.Shutdown()", "async def end(self, roles, dialogs):\n self.ended = True", "def remove_session(self) -> None:\n pass" ]
[ "0.79537493", "0.7887866", "0.7854766", "0.77201885", "0.7516735", "0.75162214", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.7487292", "0.74659026", "0.74659026", "0.74659026", "0.74659026", "0.74659026", "0.7450483", "0.7450483", "0.7369505", "0.7358845", "0.73338777", "0.7310692", "0.7275433", "0.72433203", "0.7214541", "0.71746457", "0.71620417", "0.7149696", "0.71445835", "0.7058871", "0.6936367", "0.6910185", "0.6833786", "0.6793072", "0.6707502", "0.66947526", "0.65612453", "0.647875", "0.6459291", "0.64558613", "0.6408", "0.6404361", "0.63789", "0.63414603", "0.6337673", "0.6337453", "0.632962", "0.62954855", "0.62908864", "0.62766105", "0.6255179", "0.62454", "0.62370175", "0.62317675", "0.61823654", "0.61537004", "0.61280805", "0.6122734", "0.6122734", "0.6117025", "0.61047846", "0.608215", "0.60271853", "0.6011769", "0.6009348", "0.59707975", "0.59690297", "0.59619635", "0.59608305", "0.5944423", "0.5943274", "0.5935922", "0.59182674", "0.59182674", "0.59170884", "0.5911792", "0.59045297", "0.59029645", "0.5889641", "0.5883743", "0.5883576", "0.58738154" ]
0.0
-1
If we wanted to initialize the session to have some attributes we could add those here.
def get_welcome_response(self): print( self.LOG_CLASS, '[method: get_welcome_response]' ) self._mcd.intent_name = "Welcome" self._mcd.output_speech = \ "Welcome to the Boston Public Services skill. How can I help you? " # If the user either does not reply to the welcome message or says # something that is not understood, they will be prompted again with # this text. self._mcd.reprompt_text = \ "For example, you can tell me your address by saying, " \ "\"my address is\" followed by your address." self._mcd.should_end_session = False return self._mcd
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_session(self):\n pass", "def init_session(self):\n pass", "def __init__(self, session):\n self.session = session", "def build_session_attributes(session):\n if 'attributes' in session.keys():\n if session['attributes']:\n session_attributes = session['attributes']\n else:\n # called from test\n session_attributes = {}\n session_attributes['state'] = 'started'\n session_attributes['accepted_questions'] = []\n session_attributes['rejected_questions'] = []\n else:\n # called from tap\n session_attributes = {}\n session_attributes['state'] = 'started'\n session_attributes['accepted_questions'] = []\n session_attributes['rejected_questions'] = []\n\n return session_attributes", "def __init__(self, session):\n self._session = session", "def _create_session(self):\n self.session = requests.Session() # pragma: no cover\n self.session.headers[\"Accept\"] = \"application/json\" # pragma: no cover\n if self.user: # pragma: no cover\n self.session.auth = (self.user, self.cred) # pragma: no cover", "def __init__(self, session):\n self.sess = session", "def _set_session(self):\n self.__session = sessionmaker(bind=self.__engine)()", "def init(self, activity, session):\n return {}", "def _initialize_session(self):\n session = requests.Session()\n session.auth = (self.login, self.password)\n session.verify = False\n session.headers.update({'Accept': 'application/json'})\n session.headers.update({'Content-type': 'application/json'})\n return session", "def __init__(self, new=None, session_id=None, attributes=None,\n application=None, user=None):\n default_attr = dict(new=bool(),\n session_id=str(),\n attributes=dict(),\n application=Application(),\n user=User())\n self.new = new\n self.session_id = session_id\n self.attributes = attributes\n self.application = application\n self.user = user\n self._set_default_attr(default_attr)", "def __init__(self, session, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.__client_session = session", "def add_session_attr(typename, session):\n old_session = getattr(typename, 'session', None)\n setattr(typename, 'session', session)\n yield\n if old_session:\n setattr(typename, 'session', old_session)", "def __init__(self, session):\n if not (getattr(session, \"token\", None) and isinstance(session.token, dict)):\n raise exceptions.InvalidUsageError(\"Session object is not valid\")\n self._session = session", "def session(self):", "def __init__(self,sessionpath=None,sessionlifetime=None,config=None):\n if sessionlifetime is not None:\n self.sessionlifetime = sessionlifetime\n\n if sessionpath is None:\n self.sessionpath = Config.sessionpath\n else:\n self.sessionpath = sessionpath\n\n # legt Sessionverzeichnis an, wenn nicht vorhanden\n if not os.path.exists(self.sessionpath):\n os.makedirs(self.sessionpath)\n \n\n self.cookie = Cookie.SmartCookie()\n self.loadCookie()\n self.newSession()\n\n # Attribute Laden wenn Cookie 'sid' gesetzt\n if self.cookie is not None:\n self.loadAttributes()", "def on_session_start():\n session_attributes[NUMBER_LIST_KEY] = [LOWER + i for i in range(UPPER - LOWER + 1)]\n session_attributes[LAST_QUESTION_KEY] = NO_QUESTION\n session_attributes[LAST_EXTENSION_KEY] = ''\n session_attributes[NUM_QUESTIONS_KEY] = 0", "def init_session():\n\n session = Session()\n\n # headers\n session.headers = {\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"pt-PT,pt;q=0.8,en-GB;q=0.6,en;q=0.4,en-US;q=0.2\",\n \"User-Agent\": \"Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"text/javascript, application/javascript, */*\",\n \"Referer\": \"https://sigrhe.dgae.mec.pt/openerp/menu?active=474&tzoffset=-60\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Connection\": \"keep-alive\",\n \"DNT\": \"1\",\n \"Host\": \"sigrhe.dgae.mec.pt\",\n \"Origin\": \"https://sigrhe.dgae.mec.pt\",\n }\n\n return session", "def init_session(self):\n self._session = requests.Session()", "def __init__(self, version=None, sessionAttributes=None, response=None):\n default_attr = dict(version='1.0',\n sessionAttributes=dict(),\n response=Response())\n self.version = version\n self.sessionAttributes = sessionAttributes\n self.response = response\n self._set_default_attr(default_attr)", "def __init__(self, request):\n self.session = request.session\n cart = self.session.get('cart-session')\n if \"cart-session\" not in self.session:\n cart = self.session['cart-session'] = {}\n else:\n self.cart = cart", "def on_session_started(session_started_request, session):\n print(\"on_session_started requestId=\" + session_started_request['requestId'] + \", sessionId=\" + session['sessionId'])\n \n session['attributes'] = {\"currentQuestion\":0, \"score\":0, \"date\":datetime.datetime.now().strftime(\"%B-%d-%Y %I:%M%p\"), \"billNo\":\"\", \"age\":\"\", \"result\":[]}", "def _init_session(session):\n if session is None:\n session = requests.Session()\n return session", "def _init_attributes(self):\n if os.name == \"nt\":\n if \"64\" in platform.architecture()[0]:\n platform_arch = \"x86_64\"\n elif \"32\" in platform.architecture()[0]:\n platform_arch = \"i386\"\n else:\n platform_arch = platform.architecture()\n os_ver = f\"Windows-{platform.win32_ver()[1]}\"\n else:\n platform_arch = platform.machine()\n if platform.system() == \"Darwin\":\n os_ver = f\"macOS-{platform.mac_ver()[0]}\"\n else:\n os_ver = \"-\".join(linux_distribution()[0:2])\n\n license_chunks = LICENSE.split(\" \")\n if license_chunks[0] == \"GPLv2\":\n client_license = \"GPL-2.0\"\n else:\n client_license = \"Commercial\"\n\n default_attributes = {\n # Process id\n \"_pid\": str(os.getpid()),\n # Platform architecture\n \"_platform\": platform_arch,\n # OS version\n \"_os\": os_ver,\n # Hostname of the local machine\n \"_source_host\": socket.gethostname(),\n # Client's name\n \"_client_name\": \"mysql-connector-python\",\n # Client's version\n \"_client_version\": \".\".join([str(x) for x in VERSION[0:3]]),\n # Client's License identifier\n \"_client_license\": client_license,\n }\n self._settings[\"attributes\"].update(default_attributes)\n\n if \"connection-attributes\" in self._settings:\n for attr_name in self._settings[\"connection-attributes\"]:\n attr_value = self._settings[\"connection-attributes\"][attr_name]\n # Validate name type\n if not isinstance(attr_name, str):\n raise InterfaceError(\n f\"Attribute name '{attr_name}' must be a string type\"\n )\n # Validate attribute name limit 32 characters\n if len(attr_name) > 32:\n raise InterfaceError(\n f\"Attribute name '{attr_name}' exceeds 32 characters \"\n \"limit size\"\n )\n # Validate names in connection-attributes cannot start with \"_\"\n if attr_name.startswith(\"_\"):\n raise InterfaceError(\n \"Key names in 'session-connect-attributes' cannot \"\n f\"start with '_', found: {attr_name}\"\n )\n # Validate value type\n if not isinstance(attr_value, str):\n raise InterfaceError(\n f\"Attribute name '{attr_name}' value '{attr_value}' \"\n \" must be a string type\"\n )\n\n # Validate attribute value limit 1024 characters\n if len(attr_value) > 1024:\n raise InterfaceError(\n f\"Attribute name '{attr_name}' value: '{attr_value}' \"\n \"exceeds 1024 characters limit size\"\n )\n\n self._settings[\"attributes\"][attr_name] = attr_value", "def __init__(self, version=None, session=None, request=None):\n default_attr = dict(version=str(),\n session=Session(),\n request=Request())\n self.version = version\n self.session = session\n self.request = request\n self._set_default_attr(default_attr)", "def __init__(self, db_session):\n self.db_session = db_session", "def _new_session(self, username_key=None, **attributes):\n for key in ['username', 'token', 'tenant_id']:\n if attributes.get(key, None) is None:\n attributes[key] = key + \"_\" + text_type(uuid4())\n if 'expires' not in attributes:\n attributes['expires'] = (\n datetime.utcfromtimestamp(self._clock.seconds())\n + timedelta(days=1)\n )\n session = Session(**attributes)\n if username_key is None:\n username_key = session.username\n self._username_to_token[username_key] = session.token\n self._token_to_session[session.token] = session\n self._tenant_to_token[session.tenant_id] = session.token\n return session", "def __init__(self, url, session):\n self._url = url\n self._session = session", "def init_session_values():\n # Default date span = tomorrow to 1 week from now\n now = arrow.now('local') # We really should be using tz from browser\n tomorrow = now.replace(days=+1)\n nextweek = now.replace(days=+7)\n flask.session[\"begin_date\"] = tomorrow.floor('day').isoformat()\n flask.session[\"end_date\"] = nextweek.ceil('day').isoformat()\n flask.session[\"daterange\"] = \"{} - {}\".format(\n tomorrow.format(\"MM/DD/YYYY\"),\n nextweek.format(\"MM/DD/YYYY\"))\n # Default time span each day, 8 to 5\n flask.session[\"begin_time\"] = interpret_time(\"9am\")\n flask.session[\"end_time\"] = interpret_time(\"5pm\")\n flask.session[\"userTimezone\"] = \"America/Los_Angeles\"", "def __init__(self, **kwargs):\r\n self._kwargs = kwargs\r\n\r\n if 'uri' in self._kwargs:\r\n self.session = get_session(self._kwargs['uri'], mode='session')\r\n else:\r\n # open a database session\r\n self.session = get_session(uri=None, mode='session', **{k: v for k, v in self._kwargs.items() if k in ('db_name', 'data_path')})", "def __init__(self):\r\n # create a session id\r\n self.session = ViSession()", "def __init__(self):\n util.Database.Object.__init__(self, 'sessions')\n self.values = {\n 'time_zone': unix.GetTimeZone(),\n 'start_time': unix.GetTime(),\n 'ppid': unix.GetPPID(),\n 'pid': unix.GetPID(),\n 'tty': unix.GetTTY(),\n 'uid': unix.GetUID(),\n 'euid': unix.GetEUID(),\n 'logname': unix.GetLoginName(),\n 'hostname': unix.GetHostName(),\n 'host_ip': unix.GetHostIp(),\n 'shell': unix.GetShell(),\n 'sudo_user': unix.GetEnv('SUDO_USER'),\n 'sudo_uid': unix.GetEnv('SUDO_UID'),\n 'ssh_client': unix.GetEnv('SSH_CLIENT'),\n 'ssh_connection': unix.GetEnv('SSH_CONNECTION')\n }", "def __init__(self):\n\n self._session = requests.Session()", "def create_session(self):\n self._session = self.create_scoped_session()\n self.session = self._session()", "def test_set_session():", "def use_session(cls, session):\r\n cls._session = session", "def test_add_authenticated_session_var(self):\r\n req = Mock(authname='john', base_path='/', incookie=Cookie())\r\n session = Session(self.env, req)\r\n session['foo'] = 'bar'\r\n session.save()\r\n cursor = self.db.cursor()\r\n cursor.execute(\"SELECT value FROM session_attribute WHERE sid='john'\"\r\n \"AND name='foo'\") \r\n self.assertEqual('bar', cursor.fetchone()[0])", "def __init__(self, session):\n self.session = session\n self.dbi = DBInterface(self.session)", "def initsession():\n global stringSearch_component\n global year_component\n global semester_component\n\n sess = requests.Session()\n soup_jar = {'hello':'hello'}\n sess.headers = SESSION_HEADERS\n sess.cookies.update({'sap-usercontext': 'sap-client=700'})\n\n res_init = sess.get(SOURCE_URL)\n soup_jar['init'] = BeautifulSoup(res_init.text, 'html.parser')\n\n form = soup_jar['init'].find('form', \n {'name': 'sap.client.SsrClient.form'})\n action = form.get('action')\n res_base = sess.post(HOST_URL + action)\n\n soup_jar['base'] = BeautifulSoup(res_base.text, 'lxml')\n\n sapid = get_sap_wd_secure_id(soup_jar['base'])\n contextid = get_sap_contextid(soup_jar['base'])\n stringSearchClass_component = \\\n get_string_search_class_component(soup_jar['base'])\n stringSearch_component = \\\n get_string_search_component(soup_jar['base']) \n year_component = get_year_component(soup_jar['base'])\n semester_component = get_semester_component(soup_jar['base'])\n\n return sapid, contextid", "def init_session_values():\n # Default date span = tomorrow to 1 week from now\n now = arrow.now('local') # We really should be using tz from browser\n tomorrow = now.replace(days=+1)\n nextweek = now.replace(days=+7)\n flask.session[\"begin_date\"] = tomorrow.floor('day').isoformat()\n flask.session[\"end_date\"] = nextweek.ceil('day').isoformat()\n flask.session[\"daterange\"] = \"{} - {}\".format(\n tomorrow.format(\"MM/DD/YYYY\"),\n nextweek.format(\"MM/DD/YYYY\"))\n # Default time span each day, 8 to 5\n flask.session[\"begin_time\"] = interpret_time(\"9am\")\n flask.session[\"end_time\"] = interpret_time(\"5pm\")", "def test_add_anonymous_session_var(self):\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n req = Mock(authname='anonymous', base_path='/', incookie=incookie,\r\n outcookie=Cookie())\r\n session = Session(self.env, req)\r\n session['foo'] = 'bar'\r\n session.save()\r\n cursor = self.db.cursor()\r\n cursor.execute(\"SELECT value FROM session_attribute WHERE sid='123456'\")\r\n self.assertEqual('bar', cursor.fetchone()[0])", "def __init__(self, session: HttpSession, host: str) -> None:\n self.client = session\n self.host = host\n self.record_mode = True if hasattr(self.client, \"record_mode\") else False\n self.datatype_cache = DataTypeCache()\n self.user_agent = \"\"\n # Set to default as desktop request.\n self.set_user_agent_to_desktop()", "def __init__(self, config):\n self.config = config\n self.__session = None", "def __init__(self, req):\n #pass the request in making in so we can edit it later if requested (ACL for example)\n self.ip = req.connection.remote_ip\n c = Cookie.get_cookies(req)\n if not c.has_key('mps'):\n self.sessid = Uid().new_sid(req)\n else:\n c = c['mps']\n self.sessid = c.value\n \n #make new cookie so the cycle continues\n c = Cookie.Cookie('mps', self.sessid)\n c.path = '/'\n Cookie.add_cookie(req, c)\n \n self.session_path = \"%s%s\"%(path_to_sessions, self.sessid)\n self.full_session_path = \"%s%s\"%(self.session_path, db_extension)\n \n #use previous authenication until cookie is reevaluated, if they are officially logged in (in Instance)\n if os.path.exists(self.full_session_path):\n session = shelve.open(self.session_path, 'rw')\n self.user = session['USER_']\n session.close()\n else:\n self.user = self.unauthorized", "def on_session_started(session_started_request, session):\n \n #session.attributes['result_number'] = 1\n session['attributes'] = {}\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def __init__(self, session=None, key=None, salt=b\"\", variables_to_sign=None):\n super().__init__() # Yes, I know that this currently doesn't do anything.\n self.session = session\n self.key = key or Session.SECRET\n self.salt = salt\n self.variables_to_sign = variables_to_sign or []\n assert \"_signature\" not in self.variables_to_sign", "def __post_init__(self):\n self._session = Session()\n self._post_hooks()", "def __init__(self, session_id, state, media_key, position):\n self.session_id = session_id\n self.state = state\n self.media_key = media_key\n self.position = position\n self.timestamp = datetime.now()", "def test_modify_authenticated_session_var(self):\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('john', 1, 0)\")\r\n cursor.execute(\"INSERT INTO session_attribute VALUES \"\r\n \"('john', 1, 'foo', 'bar')\")\r\n\r\n req = Mock(authname='john', base_path='/', incookie=Cookie())\r\n session = Session(self.env, req)\r\n self.assertEqual('bar', session['foo'])\r\n session['foo'] = 'baz'\r\n session.save()\r\n cursor.execute(\"SELECT value FROM session_attribute \"\r\n \"WHERE sid='john' AND name='foo'\") \r\n self.assertEqual('baz', cursor.fetchone()[0])", "def load_session(session):\n def inner():\n web.ctx.session = session\n return inner", "def __init__(self, request):\n # storage of current session making it accessible to other method of cart class\n self.session = request.session\n\n # getting cart from current session using self\n cart = self.session.get(settings.CART_SESSION_ID)\n\n if not cart:\n # saving an empty cart in the session if no cart is present. Product id used as keys in dictionary and quantity and price as value for each key to guarantee a product is not addded more than once in the cart\n cart = self.session[settings.CART_SESSION_ID] = {}\n self.cart = cart", "def test_modify_anonymous_session_var(self):\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('123456', 0, 0)\")\r\n cursor.execute(\"INSERT INTO session_attribute VALUES \"\r\n \"('123456', 0, 'foo', 'bar')\")\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n req = Mock(authname='anonymous', base_path='/', incookie=incookie,\r\n outcookie=Cookie())\r\n session = Session(self.env, req)\r\n self.assertEqual('bar', session['foo'])\r\n session['foo'] = 'baz'\r\n session.save()\r\n cursor.execute(\"SELECT value FROM session_attribute WHERE sid='123456'\")\r\n self.assertEqual('baz', cursor.fetchone()[0])", "def init_attrs(self):\n raise NotImplementedError", "def set_session(session):\n\n global session_\n session_ = session\n import observatory.api.server.api as api\n\n api.session_ = session", "def setup_session():\n print(\"Setting up session\")\n engine = setup_engine()\n Base.metadata.bin = engine\n\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n\n return session", "def __init__(self, session, name):\n\n self.name = name\n self.session = session\n self.vectors = VectorsList(session, name)\n\n # init session db for current session\n if name not in self.session:\n self.session[self.name] = {\n 'stored_args': {},\n 'results': {},\n 'status': Status.IDLE\n }\n\n self.init()", "def testSessionCreate(self):\n success = False\n attr = None\n\n try:\n attr = self.session.create_visit_attr()\n\n success = True\n except Exception:\n pass\n\n self.failUnless(success)\n self.failIf(attr is None)", "def initialize(self, session,\n prepare=lambda: None, finalize=lambda: None):\n self.session = session\n self.prepare = prepare\n self.finalize = finalize\n self._set_session_callbacks()", "def on_session_started(session_started_request, session):", "def __init__(self, session):\n super(FlattrApi, self).__init__(session)\n self.things = ThingApi(session)\n self.users = UsersApi(session)\n self.authenticated = AuthenticatedApi(session)", "def __init__(self, session, request_authority, route_url):\n self._session = session\n self._route_url = route_url\n self.request_authority = request_authority", "def create_session(self, loop):\n session = ClientSession(loop=loop, json_serialize=json_dumps)\n # Setting directly on `session` will raise deprecation warning\n object.__setattr__(session, \"_request\", self.match_request)\n return session", "def setup (cls, **kwargs):\n\n cherrypy.log (\"Using PostgresSession\",\n context = 'SESSION', severity = logging.INFO)\n\n for k, v in kwargs.items ():\n setattr (cls, k, v)", "def _create_session(self) -> Session:\n session = Session()\n\n # Sets the client side and server side SSL cert verification, if provided as properties.\n if ssl_config := self.properties.get(SSL):\n if ssl_ca_bundle := ssl_config.get(CA_BUNDLE): # type: ignore\n session.verify = ssl_ca_bundle\n if ssl_client := ssl_config.get(CLIENT): # type: ignore\n if all(k in ssl_client for k in (CERT, KEY)):\n session.cert = (ssl_client[CERT], ssl_client[KEY])\n elif ssl_client_cert := ssl_client.get(CERT):\n session.cert = ssl_client_cert\n\n # If we have credentials, but not a token, we want to fetch a token\n if TOKEN not in self.properties and CREDENTIAL in self.properties:\n self.properties[TOKEN] = self._fetch_access_token(session, self.properties[CREDENTIAL])\n\n # Set Auth token for subsequent calls in the session\n if token := self.properties.get(TOKEN):\n session.headers[AUTHORIZATION_HEADER] = f\"{BEARER_PREFIX} {token}\"\n\n # Set HTTP headers\n session.headers[\"Content-type\"] = \"application/json\"\n session.headers[\"X-Client-Version\"] = ICEBERG_REST_SPEC_VERSION\n session.headers[\"User-Agent\"] = f\"PyIceberg/{__version__}\"\n\n # Configure SigV4 Request Signing\n if str(self.properties.get(SIGV4, False)).lower() == \"true\":\n self._init_sigv4(session)\n\n return session", "def create(self, class_name, attrs, session):", "def __init__(self):\n engine = db_connect()\n self.Session = sessionmaker(bind=engine)", "def __init__(self):\n engine = create_engine(\"postgresql://postgres:1@localhost:5432/postgres\")\n session_class = sessionmaker(bind=engine)\n self.session = session_class()", "def __init__(self, session, object_factory, request_validator):\n check_type(session, RestSession)\n\n super(NetworkSettings, self).__init__()\n\n self._session = session\n self._object_factory = object_factory\n self._request_validator = request_validator", "def __init__(self, *args, **kwargs):\n self.session = requests.Session()\n access_token = get_process_execution_user_token()\n self.session.headers[\"authorization\"] = f\"Bearer {access_token}\"\n self.session.headers[\"content-type\"] = \"application/json\"", "def addsession(cls, session, username, passwd):\n sessionkey = cls.sessionkey(session)\n tmpdict = dict({'username': username, 'password': passwd})\n sessionmgr.update(dict({sessionkey: tmpdict}))", "def create_session_dict(self, cand_id, visit_label):\n self.session_info_dict = self.session_db_obj.create_session_dict(cand_id, visit_label)\n if self.session_info_dict:\n self.cand_id = self.session_info_dict['CandID']\n self.visit_label = self.session_info_dict['Visit_label']\n self.center_id = self.session_info_dict['CenterID']\n self.project_id = self.session_info_dict['ProjectID']\n self.cohort_id = self.session_info_dict['CohortID']\n self.session_id = self.session_info_dict['ID']", "def __init__(self, session, object_factory, request_validator):\n check_type(session, RestSession)\n\n super(FabricWireless, self).__init__()\n\n self._session = session\n self._object_factory = object_factory\n self._request_validator = request_validator", "def __init__(self, server=None, auth=None, options=None):\n options = {} if options is None else options\n\n if server is not None:\n options['server'] = server\n\n merged = copy.deepcopy(self.DEFAULT_OPTIONS)\n merged.update(options)\n\n self._session = Session(auth, merged)", "def configure(self, session):\n\n raise NotImplementedError", "def setup(self):\n self.session = requests.session()\n self.session.headers.update({'Authorization': 'token %s' %\n self.access_token,\n 'Content-Type': 'application/json'})\n self.base_url = self.base_url_parts", "def init_session(self):\n ssl_context = ssl.create_default_context(\n purpose=ssl.Purpose.SERVER_AUTH, cafile=None, capath=None,\n cadata=None)\n ssl_settings = {\"ssl_context\": ssl_context}\n self.session = iRODSSession(\n host=self.module.params[\"host\"],\n port=self.module.params[\"port\"],\n user=self.module.params[\"admin_user\"],\n password=self.module.params[\"admin_password\"],\n zone=self.module.params[\"zone\"],\n **ssl_settings)", "def __init__(self, attrs = None):\n\n if attrs != None:\n self.__dict__.update(attrs)", "def test_ctor_no_cookie(self):\n request = self._make_request()\n session = self._makeOne(request)\n session_dict = session.managed_dict\n self.assertDictEqual(session_dict, {})\n self.assertIs(session.new, True)", "async def prepare(self):\n\n # Read the secure cookie which exists if we are in an authenticated\n # context (though not if the caimira webservice is running standalone).\n session = json.loads(self.get_secure_cookie('session') or 'null')\n\n if session:\n self.current_user = AuthenticatedUser(\n username=session['username'],\n email=session['email'],\n fullname=session['fullname'],\n )\n else:\n self.current_user = AnonymousUser()", "def on_start(self, session):\n pass", "def __init__(self, *kwargs):\n self.session = requests.Session()\n self.config_path = os.path.join(\n os.path.dirname(__file__), 'config.json')\n self.load_config()\n if self.application_token == '':\n self.set_application_token()\n self.token = self.get_token()\n self.get_settings()", "def init_user_session(request, user, remember=True):\n from appengine_utilities.sessions import Session\n lang = request.session['LANGUAGE_CODE']\n request.session = Session(set_cookie_expires=remember)#register the user with session\n request.session['LANGUAGE_CODE'] = lang#saved language\n user._session = request.session.get_ds_entity()\n from datetime import datetime\n user.last_login = datetime.now()\n if not user.profile: \n from georemindme.models import UserProfile\n p = UserProfile(user=user)\n p.put()\n user.put()\n request.session['user'] = user", "def __init__( self, **kwargs ):\n self.__dict__.update( kwargs )", "def _init_attributes(self):\n self.attr = {\n 'name': None,\n 'tags': [],\n 'openHours': None,\n 'type': None,\n 'parent': None,\n 'locationId': None,\n 'bannerAbbreviation': None,\n 'arcGisAbbreviation': None,\n 'geoLocation': None,\n 'geometry': None,\n 'summary': None,\n 'description': None,\n 'descriptionHtml': None,\n 'address': None,\n 'city': None,\n 'state': None,\n 'zip': None,\n 'county': None,\n 'telephone': None,\n 'fax': None,\n 'thumbnails': [],\n 'images': [],\n 'departments': [],\n 'website': None,\n 'sqft': None,\n 'calendar': None,\n 'campus': None,\n 'girCount': None,\n 'girLimit': False,\n 'girLocations': None,\n 'synonyms': [],\n 'bldgId': None,\n 'parkingZoneGroup': None,\n 'propId': None,\n 'adaParkingSpaceCount': None,\n 'motorcycleParkingSpaceCount': None,\n 'evParkingSpaceCount': None,\n 'weeklyMenu': None,\n 'notes': None,\n 'labels': {},\n 'steward': None,\n 'shape': {}\n }", "def test_new_session_promotion(self):\r\n cursor = self.db.cursor()\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='john', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n self.assertEqual('john', session.sid)\r\n session.save()\r\n\r\n cursor.execute(\"SELECT sid,authenticated FROM session\")\r\n self.assertEqual(('john', 1), cursor.fetchone())\r\n self.assertEqual(None, cursor.fetchone())", "def __init__(self, session, object_factory):\n check_type(session, RestSession)\n\n super(RoomsAPI, self).__init__()\n\n self._session = session\n self._object_factory = object_factory", "def __init__(self, url, username, password):\n self.session = requests.session()\n self.session.auth = (username, password)\n self.session.headers.update({\n 'Accept': JSON_CONTENT_TYPE,\n })\n self.url = url", "def _init_session(self):\n self.sess = tf.Session(graph=self.g)\n self.sess.run(self.init)", "def __init__(self):\n\n self.session = requests.session()\n self.current_user_agent_index = 0\n self.headers = {\n 'Host': 'www.amazon.com',\n 'User-Agent': _USER_AGENT_LIST[0],\n 'Accept': 'text/html,application/xhtml+xml,\\\n application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n }\n self.product_dict_list = []", "def on_session_started(session_started_request, session):\r\n # Add additional code here as needed\r\n pass", "def on_session_started(session_started_request, session):\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])\n # any initialization logic goes here", "def __init__(self):\n self._email: str | None = None\n self._session = VorwerkSession()", "def _init_session(self):\n self.sess = tf.Session(config=self.config, graph=self.g)\n self.sess.run(self.init)", "def _init_node_attributes(self):\n assert False", "def req_session():\n request = Request()\n session = PoorSession(request.secret_key)\n session.data['test'] = True\n session.write()\n request.cookies = session.cookie\n return request", "def on_session_started(session_started_request, session):\n # Add additional code here as needed\n pass", "def __init__(self, session, assoc_type):\n super(AssociateRequest, self).__init__()\n self.session = session\n self.assoc_type = assoc_type\n self.namespace = OPENID2_NS", "def initialize(self, *a, **kw):\n\n webapp2.RequestHandler.initialize(self, *a, **kw)\n uid = self.read_secure_cookie('user_id')\n self.user = uid and User.by_id(int(uid))", "def init():\n get_writer_session()", "def _initialize_session(self):\n config = tf.ConfigProto()\n # restrict model GPU memory utilization to min required\n config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=config)\n tf_ver = int(tf.__version__.split('.')[1])\n if TF_VERSION <= 0.10:\n self.sess.run(tf.initialize_all_variables())\n logswriter = tf.train.SummaryWriter\n else:\n self.sess.run(tf.global_variables_initializer())\n logswriter = tf.summary.FileWriter\n self.saver = tf.train.Saver()\n self.summary_writer = logswriter(self.logs_path, graph=self.sess.graph) # change by ccx, add the graph_def", "def session_preparation(self):\n self._test_channel_read()\n self.set_base_prompt()\n self.disable_paging(command=\"session paginate disable\")\n self.set_terminal_width(command='terminal width 511')" ]
[ "0.74988097", "0.74988097", "0.73190755", "0.71198505", "0.70534444", "0.69491816", "0.68204165", "0.6755643", "0.6719715", "0.6653046", "0.6649403", "0.6623757", "0.65735793", "0.655451", "0.6542385", "0.65337366", "0.6529596", "0.64412206", "0.6429274", "0.6427235", "0.63800746", "0.6346776", "0.6340468", "0.63396734", "0.63344526", "0.6285446", "0.62772936", "0.62317926", "0.62282956", "0.62268895", "0.62266356", "0.62008125", "0.6196198", "0.61930376", "0.6181639", "0.6167317", "0.61556506", "0.6133275", "0.61074746", "0.610724", "0.6078202", "0.6059292", "0.60534483", "0.60524005", "0.60494137", "0.60463893", "0.6019515", "0.6012054", "0.6006532", "0.60044676", "0.60039496", "0.5993174", "0.59910256", "0.5989555", "0.5986645", "0.5978356", "0.5977413", "0.5976449", "0.5974016", "0.59710854", "0.59633076", "0.5953815", "0.59381354", "0.5927385", "0.59254134", "0.5918273", "0.59017366", "0.5898055", "0.58977574", "0.58887696", "0.58675575", "0.58672935", "0.58644426", "0.5854298", "0.5846533", "0.5843355", "0.5841617", "0.5838275", "0.583625", "0.5835535", "0.58128303", "0.5806365", "0.5803099", "0.5802917", "0.5786213", "0.5785067", "0.57787025", "0.5772873", "0.5765699", "0.5764503", "0.5761085", "0.5759423", "0.5741776", "0.57356733", "0.57326776", "0.5724419", "0.5723101", "0.5717587", "0.57089955", "0.5703379", "0.5696422" ]
0.0
-1
Gets the info of this atom. Returns
def __repr__(self): s = '{\n' s += 'symbol: \'' + self.symbol + '\',\n' s += 'label: \'' + self.label + '\',\n' s += 'coords: ' + str(self.coords) + ',\n' s += 'mass: ' + str(self.m) + ',\n' s += 'radius: ' + str(self.radius) + '\n' s += '}\n' return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_atom_info(self):\n return", "def getInfo(self):\n return self._info", "def getInfo(self):\n return self.info", "def info(self):\n return self._info", "def info(self):\n return self._info", "def get_info(self) -> str:\n return self.info", "def get_atom_infos(self):\n\n yield from self._atom_infos", "def info(self):\r\n return self._get('info', {})", "def get_info(self):\n pass", "def get_info(self):\n pass", "def getInfo():", "def info(self):\n return self.client.call('GET', self.name + 'info')", "def return_info(self):\n\t\treturn self.info", "def info(self):\n if not self._was_read:\n self.read()\n return self._info", "def get_atom_infos(self):\n\n yield from self._molecule_state.get_atom_infos()", "def info(self) -> str:\n return self._info", "def info(self) -> str:\n return self._info", "def info(self):\n return self.info_text", "def info(self) -> Info:\n raw = self._call('GET', 'info')\n return Info.parse_raw(raw)", "def detail(self):\n info = self.info()\n return info", "def get_info(self):\n return None", "def get(self) -> Info:\n return InfoService.get()", "def get_info(self):\n return \"TODO !\"", "def get_info(self, name):\n return self.info[name]", "def get_info(self):\n raise NotImplementedError(\"Robot.get_info\")", "def get_info(self) -> str:\n raise NotImplementedError()", "def info(self) -> str:\n return pulumi.get(self, \"info\")", "def info(self):\n return requests.get(self.info_url + self.pid).json()", "def get_info(atom):\n return [atom.GetIdx(), atom.GetNeighbors()[0].GetIdx()]", "def info(self):\n path = self._get_path('info')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return response", "def info(self):\n return self.nfo", "def info(self):\n return (self._title, self._version, self._descr)", "def get_info(self, info):\r\n pass", "def mychem_info(self):\n return self._mychem_info", "def info(self):\n return self._fetch_json('/api/info')", "def info(self) -> dict:", "def info(self):\n return self.current_run.info", "def info(self):\n return self.__dict__[self.sid]", "def info(self):", "def info(self):", "def get_display_info(self):\n return self.display_info", "def full_info(self, object, name, value):\n return self.info()", "def get_info(self):\n self.exists = self.check_subscr()\n return self.attrs", "def info_from_entry(self, entry):\n info = super().info_from_entry(entry)\n return info", "def info(self):\n pp = pprint.PrettyPrinter(indent=4)\n print_text_box('Info')\n pp.pprint(self.manager.data[\"info\"])\n print('')", "def info(self):\n attr_list = []\n for name in self._metadata:\n attr_list.append(name + \": \" + str(getattr(self, name, None)) + \"\\n\")\n print(f\"{self.__class__}\\n\" + \"\".join(attr_list))", "def info(self):\n attr_list = []\n for name in self._metadata:\n attr_list.append(name + \": \" + str(getattr(self, name, None)) + \"\\n\")\n print(f\"{self.__class__}\\n\" + \"\".join(attr_list))", "def _get_information(self):\n pass", "def infos(self):\n return self._infos", "def info(self):\n self._info()", "def info(self) -> Optional[Dict[str, Any]]:\n return self._state.get(\"info\", None)", "def info(self):\n resp = requests.get(\"%s/api/info\"%self.urlbase, verify=False)\n return resp.json", "def get_residue_info(self):\n return", "def info(self):\n return InfoManager(session=self._session)", "def get_mol_info(self):\n return", "def GetInfo(self):\n import yfinance as yf\n return yf.Ticker(self._symbol).get_info()", "def get_info(self):\n out = ''\n for k in sorted(self.components.keys()):\n out += '{:s}: {:s}'.format(k, self.info[k]) + '\\n'\n return(out)", "def info(self):\r\n\r\n return self.sim_info", "def get_info(self):\n return {}", "def info(self):\n print self.id, self.type, self.xyz.get_xyz", "def get_info(self) -> Optional[Dict[str, Any]]:", "def extended_info(self):\n return self.client.call('GET', self.name + 'extended-info')", "def get_info(self):\n\t\tret = 'Flash info\\n'\n\t\tret += '\\tGPNVM bits: ' + str(self.read_gpnvm()) + '\\n'\n\t\tret += '\\tUnique identifier area: ' + self.read_unique_identifier_area().decode('ascii', 'replace') + '\\n'\n\t\tret += '\\tDescriptor: ' + str(self.read_descriptor()) + '\\n'\n\t\treturn ret", "def contact_info(self):\n return self._contact_info", "def info() -> None:", "def readMetaInfo(self):\n\t\tdata = self._fileSystem.readMetaInfo()\n\t\treturn data", "def get_info(self) -> str:\n info = ffi.new(\"char **\")\n ret = lib.Fapi_GetInfo(self._ctx, info)\n _chkrc(ret)\n return ffi.string(_get_dptr(info, lib.Fapi_Free)).decode(self.encoding)", "def info(self):\n return (self.kind, self.value)", "def info(self):\n data = await self.get_data(INFO)\n return data['motion']", "def info(self):\n info = []\n # meta data\n meta = self.meta\n for key in meta:\n info.append((key, self.meta[key]))\n # background correction\n info += self._fl.info\n return info", "def info(self) -> _InfoType:\n return {}", "def info(self):\n return {}", "def get_info(self, key: str) -> TaskInfo:\n return self.task_graph.nodes[key][\"info\"]", "def get_account_info(self):\n resp = requests.get(\n self.URL + 'info/',\n headers={'Authorization': 'Token ' + self.api_key}\n )\n\n return self.__handle_response(resp)", "def get_information(self):\n try:\n return self._get_information()\n except(AttributeError, KeyError) as e:\n self._logger.error(f\"Error scrapping the tab information: {e}\")", "def getInfo(self):\n self.name, self.description = achievements[self.id]", "def get_main_information(self) -> Dict:\n if self.information is None:\n self.information = self.orthanc.get_instance_information(\n self.identifier\n )\n\n return self.information", "def info(self):\n return dict(\n name=self.name,\n offset=self.offset,\n length=self.length,\n width=self.width,\n height=self.height,\n )", "def get(self):\n return self._metadata", "def get_mn_info(self):\n\t\treturn self._infoCommonMuscleConnections, self._infoSpecialConnections", "def get_info(self):\n if self.status != \"not connected\":\n m = self.serial\n m.write(\"info?\" + \"\\r\\n\")\n info = m.read(100)\n info = info[7:]\n result = string.strip(info)\n return result\n else:\n pass", "def get_exchange_info(self):\n return self.request.get(path=\"/info\")", "def get_info(self, pos):\n if pos in self._mine:\n return mine_data(self._mine[pos])\n return {}", "def info(self):\n _, data = yield from self.transport.perform_request('GET', '/')\n return data", "def infolist(self):\r\n return list(self.infoiter())", "def get_info(self):\n url = self._url_for_op('info')\n data= None # This will be a GET request since data is None\n response = self._get_raw_response(self._get_json_headers,\n self._get_json_response, url, data)\n response = json.loads(response)\n self.api_info = response['results']\n return self.api_info", "def getInfo(notification):", "def info() -> Dict[str, Any]:", "def get_music_info(self):\n return self.get(COMMAND_UIC, 'GetMusicInfo')", "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def info(self):\n version_str = self.version\n return Utils.version_str2tuple(version_str)", "def info(): # noqa: E501\n return 'do some magic!'", "def getInfo(self):\n return self.name + \" [\" + self.target_type + \"]\"", "def usage_information(self):\n return self._usage_information", "def userinfo(self):\n return self._userinfo", "def info(self):\n for key, value in self.dataset['info'].items():\n print('{}: {}'.format(key, value))", "def info(self, key = None):\n return self.client.get(self.name).getBodyData(key)", "def mod_info(self):\n return (self.name, self.version, self.author, getattr(self.__class__, \"nsfw\", False))", "def info(self):\n return nx.info(self.tree)\n\n\n # def children(self):\n \"\"\" Return the children of the current node.\n\n \"\"\"\n # return self.left, self.right" ]
[ "0.8621765", "0.8073575", "0.8061986", "0.786965", "0.77836376", "0.76870424", "0.7581794", "0.7577282", "0.74306035", "0.74306035", "0.7390646", "0.7379589", "0.7364209", "0.73237324", "0.73167586", "0.73095566", "0.73095566", "0.72781247", "0.72658557", "0.7199898", "0.71804565", "0.7155547", "0.7146474", "0.7131625", "0.7052792", "0.69905734", "0.6959838", "0.6942494", "0.6935368", "0.6902504", "0.6882236", "0.6879988", "0.6876202", "0.68627656", "0.68158287", "0.68152416", "0.6752167", "0.6707329", "0.6705754", "0.6705754", "0.66990405", "0.6696837", "0.6695519", "0.66940767", "0.6685984", "0.6685436", "0.6685436", "0.6680416", "0.6676266", "0.6670463", "0.66594636", "0.66464055", "0.66341925", "0.66142684", "0.66124356", "0.6585673", "0.65654945", "0.65419394", "0.6533683", "0.64958733", "0.6495721", "0.6470919", "0.64528894", "0.6421371", "0.641158", "0.63898253", "0.6374", "0.6370757", "0.634047", "0.6323078", "0.631952", "0.6308437", "0.63054913", "0.63050467", "0.6290858", "0.6289425", "0.6278547", "0.62696755", "0.62517166", "0.624219", "0.62362313", "0.6232239", "0.6220175", "0.62194973", "0.62099445", "0.61998385", "0.61873657", "0.61705774", "0.6149393", "0.6148739", "0.6148739", "0.6148739", "0.6143298", "0.61278737", "0.6114983", "0.6114935", "0.6107693", "0.6095728", "0.6091143", "0.6084986", "0.6080634" ]
0.0
-1
Gets the coordinates of this atom. Returns
def get_coords(self): return self.coords
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def coordinates(self):\n return self.xy", "def get_coordinates(self):\n return self.coordinates", "def coordinates(self):\n return self._coordinates", "def coordinates(self):\n return self._coordinates", "def getCoords(self):\r\n \r\n return self.coords", "def coords(self):\n return nx.get_node_attributes(self.network, 'coords')", "def xy(self):\n return self.coords.xy", "def coordinates(self):\n return np.array([self.x, self.y])", "def coordinates(self) -> Tuple[int, int]:\n return self.x, self.y", "def coords(self):\n return (self.x, self.y, self.z)", "def coords(self):\n return (self.x, self.y, self.z)", "def coords(self):\n\n return self.__get_gps_location()", "def get_coordinates(self):\n return np.array([(n.x, n.y) for n in self.nodes])", "def coordinates(self):\n return self.latitude, self.longitude", "def coordinates(self):\n if hasattr(self, '_coordinates'):\n return self._coordinates\n else:\n return self._points", "def getCoords(self):\n\n if self._coords is not None:\n return self._coords[self._acsi].copy()", "def coords(self) -> Tuple[float, float]:\n return self.lat, self.lng", "def coordinates(self) -> Optional[Coordinates]:\n if self.atoms is None:\n return None\n\n return self.atoms.coordinates", "def get_coords(self):\n return [self.x,self.y,self.w,self.h]", "def xy(self) -> Tuple[int, int]:\n return self._x, self._y", "def coordinates(self) -> Tuple[float, float, float, float, float]:\n return (self.x, self.y, self.x + self.width, self.y + self.height)", "def get_coord(self):\n return self.coord", "def getMachineCoordinates(self):\n return (self.x, self.y, self.z)", "def get_coords(self) -> Tuple[int]:\r\n return self.file, self.rank", "def coordinates(self):\n return np.array([[f.x, f.y] for f in self])", "def position(self):\n return self._x, self._y", "def _getCoords(self):\n\n if self._coords is not None:\n return self._coords[self._acsi]", "def position(self):\n return self.x, self.y", "def xy(self) -> Tuple[float, float]:\n return (self.x, self.y)", "def get_edge_coords(self):\n return self.coords", "def get_pos(self):\n return (self.x, self.y)", "def coordinates(self):\n location = GoogleMaps(self.location)\n coordinates = location.coordinates\n return coordinates", "def get_position_coords(cls):\n row = math.floor(cls.position / cls.size)\n col = cls.position - row * cls.size\n return row, col", "def get(self):\n return self.x, self.y", "def getXY(self):\n return (self.X,self.Y)", "def coordinate(self) -> Tuple[float, float]:\n return self.lat, self.lon", "def get_position(self):\n return (self.x_pos, self.y_pos)", "def coords(self):\n return np.column_stack((self.x_coord_list, self.y_coord_list, self.z_coord_list))", "def get_location(self):\r\n return self.__x, self.__y", "def x(self):\n return self.coords[0]", "def getCoordinates(self):\n return list(self.gridVars.keys())", "def extractCoords(self):\n if not self.rank:\n logging.info('Extracting atomic poitions')\n\n # Extract coordinates from liggghts\n self.lmp.command('variable x atom x')\n x = Rxn.lmp.extract_variable(\"x\", \"group1\", 1)\n\n self.lmp.command('variable y atom y')\n y = Rxn.lmp.extract_variable(\"y\", \"group1\", 1)\n\n self.lmp.command('variable z atom z')\n z = Rxn.lmp.extract_variable(\"z\", \"group1\", 1)\n\n coords = np.zeros((self.lmp.get_natoms(),3))\n\n for i in range(self.lmp.get_natoms()):\n coords[i,:] = x[i], y[i], z[i]\n\n self.lmp.command('variable x delete')\n self.lmp.command('variable y delete')\n self.lmp.command('variable z delete')\n\n return coords", "def getpos(self):\n return self.pos.cartesianas()", "def get_point(self):\n return self._x, self._y", "def get_position(self):\n ret = _pal.Vec3()\n _pal.lib.geometry_get_position(self._geometry, ret)\n return [x for x in ret]", "def x(self):\n return self._coords[0]", "def get_roi_coords(self):\n return [roi.get_coords() for roi in self.rois]", "def xy_coordinates(self):\n\n return np.meshgrid(self.x_coord, self.y_coord)", "def get_coords(self, srid=4326):\n return self.geom.transform(srid, clone=True).coords", "def coord(self):\r\n return self.model.coord", "def Getxcoord(self):\n return self.x_coord", "def get_box_coordinates(self):\n return self.box_coordinates", "def xy(self):\n return self.to_xyah()[0:2]", "def getCoords(self): # real signature unknown; restored from __doc__\r\n pass", "def coord(self, i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n cdef const double *coord = freesasa_structure_coord_array(self._c_structure)\n return [coord[3*i], coord[3*i+1], coord[3*i+2]]", "def basis_coords(self) -> CoordT:\n return self._basis_coords", "def get(self):\n return (self.x,self.y);", "def x_coords(self):\n x_coords = np.linspace(0, self.fft_length / self.samplate, self.fft_length + 1)\n return x_coords", "def get_spawn_xyz(self):\n return self.X, self.Y, self.Z", "def get_x(self):\n return self.coords[0]", "def ncoordinates(self):\n return _coordsys.coordsys_ncoordinates(self)", "def find_coordinates(self):\n\n raise NotImplementedError", "def find_coordinates(self):\n\n raise NotImplementedError", "def coords2D(self):\n return (self.x, self.y)", "def getCoord(self):\n return (self.birth, self.death)", "def point(self):\n return self.x, self.y, self.z", "def getDataCoordinates(self):\n coord = np.zeros((self.dataset.shape[0], 2))\n for i in range(len(self.dataset)):\n coord[i, 0] = self.dataset[i][0]\n coord[i, 1] = self.dataset[i][1]\n return coord", "def getPos(self):\n return self.Xpos,self.Ypos", "def get_coordinates_rot(self):\n return self.get_coordinates()", "def get_all_coordinates(self):\n coordinates = []\n\n for relative_coordinate in self.shape:\n co = [self.coordinate[0] + relative_coordinate[0], self.coordinate[1] + relative_coordinate[1]]\n coordinates.append(co)\n return coordinates", "def x_coords(self):\n\n # Format is a list with each node and its parent, separated by NaN values\n return self._list_link_to_parents(col_name=\"x\")", "def get_coords(self):\n xTK = int(jeu.coords(self.rectangle)[0]) # Coordonnées TKinter x1 et y1 du rectangle correspondant à la voiture\n yTK = int(jeu.coords(self.rectangle)[1])\n # On divise par la largeur d'une case et on renvoie les valeurs obtenues sous la forme d'un tuple\n X = xTK//100\n Y = yTK//100\n resultat = [X, Y]\n return resultat", "def get_positions(self):\n return self.positions", "def get_xy(self):\r\n return self.board.get_xy()", "def get_position(self):\n return [self._row, self._column]", "def getCoord(self, i):\n _x = self.__xpts[i]\n _y = self.__ypts[i]\n return _x, _y", "def get_points(self):\r\n return self.nx*self.ny*self.nz", "def get_pos(self):\n return (self.x/3, 3**0.5*self.y/3, self.r/3)", "def get_coordinates_list(self):\n return [tweet['coordinates'][::-1] for tweet in self.tweets_data]", "def coordinates(self):\n # TODO: Add the feature where coordinates come from multiple sources.\n # Consider whether or not you'd want to output the categorical\n # variable indicating the source of the coordinate data or\n # make the user place coordinates a different property entirely.\n try:\n bounding_box = array(\n self.status.place\n [\"bounding_box\"]\n [\"coordinates\"]\n ).squeeze()\n centroid = bounding_box.mean(axis=0)\n return centroid\n except AttributeError:\n return zeros(2)", "def get_coordinates(self):\r\n coordinates_list = []\r\n for i in range(self.__length):\r\n if self.__orientation == Direction.VERTICAL:\r\n temp = (self.__location[0] + i, self.__location[1])\r\n if self.__orientation == Direction.HORIZONTAL:\r\n temp = (self.__location[0], self.__location[1] + i)\r\n coordinates_list.append(temp)\r\n return coordinates_list", "def getTelescopeCoords(self):\n return self.header['ANT_X'],self.header['ANT_Y'],self.header['ANT_Z']", "def _get_display_coordinates(self) :\n \n return self._display_coordinates", "def getXCoordinate(self) -> float:\n return self.x_coord", "def ll_coordinates(self):\n\n x, y = self.xy_coordinates\n proj_out = check_crs('EPSG:4326')\n\n return transform_proj(self.proj, proj_out, x, y)", "def getPosicion(self):\r\n\t\treturn [self._x, self._y]", "def get_position(self):\n return parsegeometry(self.geometry())[2:]", "def coords(self):\n return coord.SkyCoord(ra=self.ra, dec=self.dec,\n distance=self.get_distance(lutz_kelker=lutz_kelker))", "def get_values(self):\n return (self.x,self.y)", "def getCoords(self):\n if self._ra == \"\" or self._dec == \"\":\n raise ValueError('Object named ' + self._name +' has no coordinates in database.')\n ra = self._ra.split(\":\")\n dec = self._dec.split(\":\")\n raTuple = (int(ra[0]), int(ra[1]), float(ra[2]))\n decTuple = (dec[0][0], int(dec[0][1:]), int(dec[1]), float(dec[2]))\n return raTuple, decTuple", "def xyz(self):\n return (self.x(), self.y(), self.z())", "def position(self):\n return self.atoms.reshape((1,-1))", "def get(self):\r\n return ((self.x, self.y), self.dir)", "def coordinates(self):", "def dirt_coords(self):\n try:\n return self.__dirt_coords\n except AttributeError:\n self.__dirt_coords = []\n for row_ind, row in enumerate(self.matrix):\n for col_ind, cell in enumerate(row):\n if cell == \"d\":\n self.__dirt_coords.append((row_ind, col_ind,))\n return self.__dirt_coords", "def getBallPos(self) -> (int,int):\n return self.x, self.y", "def position(self):\n return (self.__position)", "def position(self):\n return self._pos.to_list()", "def get_pos(self) -> tuple:\n return self.pos", "def texcoords(self):\n return self._texcoords" ]
[ "0.8518098", "0.8345379", "0.82986754", "0.82986754", "0.82948023", "0.8146323", "0.8145044", "0.8112127", "0.8061573", "0.8036323", "0.8036323", "0.7918564", "0.7912341", "0.7905923", "0.789621", "0.7851619", "0.77691483", "0.77299666", "0.76837", "0.76726896", "0.766294", "0.76354736", "0.7613365", "0.76053834", "0.7582628", "0.75804543", "0.75575846", "0.75477004", "0.7523281", "0.7471958", "0.7468716", "0.7458748", "0.74567527", "0.74487495", "0.744748", "0.7429384", "0.7427038", "0.7398302", "0.736895", "0.73555493", "0.733485", "0.73315173", "0.7308031", "0.73016506", "0.7271281", "0.7270017", "0.7245079", "0.7236688", "0.7232466", "0.721115", "0.7211109", "0.72110844", "0.72028834", "0.7187335", "0.71754116", "0.71613955", "0.7154589", "0.7150545", "0.7142901", "0.7137496", "0.71164894", "0.7111713", "0.7111713", "0.70976335", "0.7096604", "0.7070864", "0.70471644", "0.7042989", "0.70405865", "0.7012395", "0.7012154", "0.7002637", "0.6995053", "0.69861543", "0.6982764", "0.6976052", "0.697111", "0.6967144", "0.69635195", "0.69564587", "0.69515604", "0.694847", "0.69481224", "0.69475853", "0.6928558", "0.6927946", "0.6926777", "0.69209814", "0.6920333", "0.69194365", "0.69142663", "0.6893702", "0.68907005", "0.6883764", "0.68698597", "0.686371", "0.6849335", "0.6830102", "0.6828888", "0.6824195" ]
0.8384263
1
Gets the x coordinate of this atom. Returns
def get_x(self): return self.coords[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Getxcoord(self):\n return self.x_coord", "def _get_x(self):\n return self.position.x", "def getXCoordinate(self) -> float:\n return self.x_coord", "def get_x_position(self):\n return self.actual_coordinates[0]", "def x(self):\n return self._coords[0]", "def get_pos_x(self):\n return self.__pos_x", "def x(self):\n if self._x is None:\n self.compute_coordinates()\n return self._x", "def x(self):\n return self.coords[0]", "def get_x(self):\n return self.posX", "def get_x_position(self):\n return self.rect.x", "def get_x(self) -> int:\n return self.__x", "def x(self):\n return _libsbml.Point_x(self)", "def GetX(self):\r\n\r\n return self._x", "def getX(self):\n return self.__x", "def x_coord(self):\n\n return self.x0 + np.arange(self.nx) * self.dx", "def getX(self):\n return self.x", "def getX(self):\n return self.position.getX()", "def getX(self):\r\n\t\treturn self._x", "def getX(self):\n return self.position[0]", "def getX(self):\n return _libsbml.BoundingBox_getX(self)", "def get_origin_x_position(self):\n return self.origin_coordinates[0]", "def origin_x(self):\n return self._origin[0]", "def x(self):\r\n return self.position.x", "def get_ship_x(self):\n return self.x", "def getXOffset(self):\n return _libsbml.Point_getXOffset(self)", "def x(self) -> int:\n return self._x", "def x(self):\n return self._x", "def x(self):\n return self._x", "def x(self):\n return self._x", "def x(self):\n return self._x", "def x(self):\n return self._x", "def x(self):\n return self._x", "def x(self):\n return self._x", "def x(self):\n return self._x", "def x(self):\n return self._x", "def x(self):\n return self._x", "def x(self):\n return self._x", "def x(self):\n return self._x", "def x(self):\n return self._x", "def x(self):\n return self._x", "def x(self):\n return self._x", "def x(self):\n return self._x", "def __get_x__(self):\n return self.Direction['x']", "def cells_x(self):\n return self._cells[0]", "def x_origin(self):\n return self._x_origin", "def x ( self ) :\n return self.xvar", "def getX(self):\n return self.components[0]", "def getX(self):\n return self.components[0]", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def get_axis_x(self):\r\n return self.__x_axis", "def x(self):\n return self[\"x\"]", "def x(self):\n return self._kml['x']", "def get_x(self):\n\t\treturn self._collision_rect.x + 14", "def x(self):\n return self.x", "def getX(self):\n return self.proj.getX()", "def get_position(self):\n return self._find_gnx_node(self.gnx)", "def x(self):\n return (self.__x)", "def x(self) -> int:\n return self.data.x_centre >> 4", "def x(self):\n if self.es_elemento_neutro():\n raise AttributeError(\"El elemento neutro no tiene componente x\")\n else:\n return self._x", "def getMinX(self):\n return self.minx", "def locations_x(self):\n return self._locations[0]", "def x(self):\n return self._data[0]", "def get_xmin(self):\n return self.__xmin", "def reflect_x(self):\n\n return Point(self.x, - self.y)", "def x(self):\n return self._turtle.xcor()", "def x(self):\n return self._turtle.xcor()", "def xaxis ( self ) :\n return self.__xaxis", "def get_alien_x(self):\n return self.x", "def xaxis(self):\n return self._xaxis", "def xaxis ( self ) :\n return self.__xaxis", "def xaxis ( self ) :\n return self.__xaxis", "def x(self, x=None):\n\n if x is None:\n return self._x\n else:\n if not isinstance(x, int) and not isinstance(x, float):\n raise TypeError(\"x must be numeric, not '%s'\" % x)\n self._x = x", "def x(self, x=None):\n\n if x is None:\n return self._x\n else:\n if not isinstance(x, int) and not isinstance(x, float):\n raise TypeError(\"x must be numeric, not '%s'\" % x)\n self._x = x", "def x(self):\n return self._reg2val(MMA7660_X_ADDR)", "def findX(self):\n return self.x", "def getPosition(self):\n return self.x", "def anchor_x(self):\n return self._anchor_x", "def get_x(self, i):\n scale = (self.__xmax - self.__xmin) / (self.__width - 1)\n return scale * i + self.__xmin", "def GetTextX(self):\r\n\r\n return self._text_x", "def x(self):\n return self[0]", "def X(self):\n return self.x\n pass", "def get_lx(self):\r\n return int(self.dx * self.nx - self.ox)", "def getXPoint(self, x):\n # Find the correct parameter\n t = (x - self.p0.x) / self.d.x\n return self.point(t)", "def xAt(self, col):\n\n return self.bottomBoard.x + self.bottomBoard.xAt(col)", "def get(self):\n return self.x-self.offset", "def centerx(self):\n return self.left + self.width / 2", "def reflect_x(self):\n r_x = self.x\n r_y = self.y *-1\n\n return (Point(r_x,r_y))", "def x(self) -> float:\n return self.data[0]", "def x(self):\n return self.axes[1]", "def x_min(self):\n return self.get_min_value(self.X_INDEX)" ]
[ "0.8656004", "0.8442269", "0.84205234", "0.83736426", "0.8353082", "0.83035123", "0.82412916", "0.8212211", "0.82038295", "0.81553376", "0.8097587", "0.80942667", "0.7829044", "0.78141165", "0.78131527", "0.7794782", "0.7767253", "0.77473986", "0.773306", "0.76320076", "0.762475", "0.7616443", "0.7519012", "0.74905527", "0.7462451", "0.7451654", "0.7419411", "0.7419411", "0.7419411", "0.7419411", "0.7419411", "0.7419411", "0.7419411", "0.7419411", "0.7419411", "0.7419411", "0.7419411", "0.7419411", "0.7419411", "0.7419411", "0.7419411", "0.7419411", "0.7413169", "0.7370686", "0.73479426", "0.73371226", "0.7305437", "0.7305437", "0.7301706", "0.7301706", "0.7301706", "0.7301706", "0.7301706", "0.7301706", "0.7301706", "0.7301706", "0.7301706", "0.7301706", "0.7301706", "0.7282103", "0.7264494", "0.72514343", "0.72304475", "0.7213573", "0.71135455", "0.705993", "0.7055956", "0.70554286", "0.6982284", "0.6952887", "0.6949619", "0.6932509", "0.69030654", "0.6894814", "0.6868114", "0.6868114", "0.6858369", "0.68573564", "0.6823011", "0.6819969", "0.6819969", "0.68101466", "0.68101466", "0.68088704", "0.6776895", "0.6760299", "0.6759032", "0.67452514", "0.67445385", "0.6736674", "0.6698754", "0.6690204", "0.66819984", "0.66703486", "0.6666876", "0.6635775", "0.66349113", "0.663091", "0.6624954", "0.66236544" ]
0.8588003
1
Gets the y coordinate of this atom. Returns
def get_y(self): return self.coords[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def y(self):\n return self._coords[1]", "def y(self):\n return _libsbml.Point_y(self)", "def getYCoordinate(self) -> float:\n return self.y_coord", "def getY(self):\n return self.__y", "def GetY(self):\r\n\r\n return self._y", "def getY(self):\r\n\t\treturn self._y", "def y(self):\n return self.coords[1]", "def getY(self):\n return self.y", "def getY(self):\n return self.y", "def getY(self):\n return _libsbml.BoundingBox_getY(self)", "def get_y_position(self):\n return self.actual_coordinates[1]", "def y(self):\n if self._y is None:\n self.compute_coordinates()\n return self._y", "def get_y(self):\n return self.__y", "def getY(self):\n return self.position[1]", "def y_coord(self):\n\n return self.y0 + np.arange(self.ny) * self.dy", "def _get_y(self):\n return self.position.y", "def getY(self):\n return self.position.getY()", "def get_pos_y(self):\n return self.__pos_y", "def getYpos(self):\n return self.y", "def findY(self):\n return self.y", "def y(self):\n return self._y", "def y(self):\n return self._y", "def y(self):\n return self._y", "def y(self):\n return self._y", "def y(self):\n return self._y", "def y(self):\n return self._y", "def y(self):\n return self._y", "def y(self):\n return self._y", "def y(self):\n return self._y", "def y(self):\n return self._y", "def getY(self):\n return self.components[1]", "def getY(self):\n return self.components[1]", "def getY(self):\n y = self.getAttribute('y')\n kind = self.getKind()\n self._y = y if kind == 'pie' else None\n return self._y", "def __get_y__(self):\n return self.Direction['y']", "def y(self):\n return self.__y", "def y(self):\n return self.__y", "def y(self):\n return self.__y", "def y(self):\n return self.__y", "def y(self):\n return self.__y", "def y(self):\n return self.__y", "def y(self):\n return self.__y", "def y(self):\n return self.__y", "def y(self):\n return self.__y", "def y(self):\n return self.__y", "def y(self):\n return self.y", "def y(self):\n return (self.__y)", "def get_y(self):\n return self.posY", "def y ( self ) :\n return self.yvar", "def y(self):\n return self[\"y\"]", "def getY(self):\n return self.proj.getY()", "def y(self) -> int:\n return self.data.y_centre >> 4", "def get_y(self, x):\n p, y = self.get_p_y(x)\n return y", "def y(self):\n return self._kml['y']", "def getYOffset(self):\n return _libsbml.Point_getYOffset(self)", "def origin_y(self):\n return self._origin[1]", "def y(self,) -> int:\n return self._y", "def cells_y(self):\n return self._cells[1]", "def get_y_position(self): \n return self.rect.y", "def y(self):\n return self._translation[1, 0]", "def y(self):\r\n return self.position.y", "def get_axis_y(self):\r\n return self.__y_axis", "def y(self):\n return self._data[1]", "def locations_y(self):\n return self._locations[1]", "def Y(self):\n return self._Y", "def getY(self):\n return self.labels[0]", "def Y(self):\n return self.y\n pass", "def y(self):\n return self._reg2val(MMA7660_Y_ADDR)", "def get_origin_y_position(self):\n return self.origin_coordinates[1]", "def y_origin(self):\n return self._y_origin", "def y(self):\n return self._turtle.ycor()", "def y(self):\n return self._turtle.ycor()", "def get_alien_y(self):\n return self.y", "def y(self) -> float:\n return self.data[1]", "def y(self):\n return self.axes[0]", "def y(self):\n return self.top", "def get_y(self):\n\t\treturn self._collision_rect.y + 25", "def y(self):\n return self[1]", "def y(self):\n return self._arr[1]", "def y(self):\n return self.dataset.y", "def yaxis ( self ) :\n return self.__yaxis", "def yaxis ( self ) :\n return self.__yaxis", "def yax(self):\n return self.__yax", "def y0(self):\n return self._y0", "def ydata(self):\n return self._ydata", "def yaxis(self):\n return self._yaxis", "def y_axis_location(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"y_axis_location\")", "def y_axis_location(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"y_axis_location\")", "def y_axis_location(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"y_axis_location\")", "def y(self):\n return self._data.align(self._y, axis=0,\n join='inner')[1]", "def getMaxY(self):\n return self.maxy", "def y(self):\n if self.es_elemento_neutro():\n raise AttributeError(\"El elemento neutro no tiene componente y\")\n else:\n return self._y", "def yvar ( self ) :\n return self.__yvar", "def landmark_y(self):\n y_cols = [col for col in self.landmark_columns if \"y\" in col]\n return self[y_cols]", "def landmark_y(self):\n y_cols = [col for col in self.landmark_columns if \"y\" in col]\n return self[y_cols]", "def getMinY(self):\n return self.miny", "def y(self, y=None):\n\n if y is None:\n return self._y\n else:\n if not isinstance(y, int) and not isinstance(y, float):\n raise TypeError(\"y must be numeric, not '%s'\" % y)\n self._y = y", "def y(self, y=None):\n\n if y is None:\n return self._y\n else:\n if not isinstance(y, int) and not isinstance(y, float):\n raise TypeError(\"y must be numeric, not '%s'\" % y)\n self._y = y", "def y2(self):\n return self._y2", "def y_distance(self):\n return self.get_distance(self.Y_INDEX)", "def getYLabel(self): \n return self.__y_label__" ]
[ "0.85608596", "0.8548253", "0.8522086", "0.84701955", "0.84196323", "0.8416182", "0.8400373", "0.8396566", "0.8396566", "0.8329309", "0.8296547", "0.82906246", "0.8284484", "0.82126814", "0.8207803", "0.8192792", "0.81803006", "0.79841197", "0.795317", "0.7928298", "0.7902805", "0.7902805", "0.7902805", "0.7902805", "0.7902805", "0.7902805", "0.7902805", "0.7902805", "0.7902805", "0.7902805", "0.7886862", "0.7886862", "0.78454256", "0.78413796", "0.7808897", "0.7808897", "0.7808897", "0.7808897", "0.7808897", "0.7808897", "0.7808897", "0.7808897", "0.7808897", "0.7808897", "0.7754799", "0.77428746", "0.77343273", "0.77220136", "0.7712548", "0.76701754", "0.76638204", "0.7658211", "0.7636076", "0.7634211", "0.7632584", "0.7620543", "0.75617975", "0.7553165", "0.7548599", "0.7530055", "0.75172484", "0.7497401", "0.7483456", "0.7479263", "0.74732643", "0.74602544", "0.7442789", "0.7409954", "0.73874944", "0.735016", "0.735016", "0.73224443", "0.72860056", "0.72584677", "0.7257423", "0.7228221", "0.71761113", "0.71541035", "0.7117066", "0.7111895", "0.7111895", "0.70871097", "0.7076207", "0.70692444", "0.7060348", "0.7037402", "0.7037402", "0.7037402", "0.7036961", "0.6979399", "0.69255024", "0.6922021", "0.6920833", "0.6920833", "0.6920507", "0.69096404", "0.69096404", "0.68933356", "0.68821084", "0.68773663" ]
0.8524346
2
Gets the z coordinate of this atom. Returns
def get_z(self): return self.coords[2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getZ(self):\n return self.position.getZ()", "def getZ(self):\n\t\treturn self.coords.z", "def z(self):\n return self._coords[2]", "def z(self):\n return self.coords[2]", "def get_z(self) -> int:\n return self.__z", "def z(self):\r\n return self.position.z", "def getZ(self):\n return _libsbml.BoundingBox_getZ(self)", "def z(self):\n return self._z", "def z(self):\n return self._z", "def z(self):\n return self._z", "def __get_z__(self):\n return self.Direction['z']", "def z(self):\n return _libsbml.Point_z(self)", "def z ( self ) :\n return self.zvar", "def getZCoord(self, x, y):\n n = self.normal()\n z = (-n.x * (x - self.p0.x) - n.y * (y - self.p0.y) + n.z * self.p0.z) / n.z\n return z", "def z(self):\n return self._data[2]", "def getZOffset(self):\n return _libsbml.Point_getZOffset(self)", "def zaxis(self):\n return self._zaxis", "def z(self):\n return self._reg2val(MMA7660_Z_ADDR)", "def get_z(self, dt):\n return dt[\"z\"]", "def getz_index(self):\n return self._getz_index", "def M_z(self) -> int:\n return self.params.M_z", "def origin_z(self):\n return self.locations_z[0]", "def z(self):\n return self._translation[2, 0]", "def zaxis ( self ) :\n return self.__zaxis", "def z(self):\n return self._arr[2]", "def zvar ( self ) :\n return self.__zvar", "def z0(self):\n return self.params['z0']", "def z(self):\n return self[2]", "def z_distance(self):\n return self.get_distance(self.Z_INDEX)", "def _get_z_coord(cube):\n for coord in cube.coords(dim_coords=True):\n if iris.util.guess_coord_axis(coord) == 'Z':\n z_coord = coord\n break\n else:\n raise ValueError(f\"Cannot determine height axis (Z) of cube \"\n f\"{cube.summary(shorten=True)}\")\n return (z_coord, cube.coord_dims(z_coord)[0])", "def _zforce_xyz(self,x,y,z):\n return -2.*np.pi*self._rhoc_M * self.a**3*self._b*self._c * \\\n _forceInt(x, y, z, self._a2, self._b2*self._a2, self._c2*self._a2, self.n, 2)", "def getZMax(self):\n return self.zmax", "def getZRange(self):\n return self.z_array", "def xyz(self):\n return self._xyz", "def world_zaxis(self):\n return observable.MJCFFeature('xmat', self._entity.root_body)[6:]", "def meshz(self):\n if self._meshz is None:\n if self.ndim == 3:\n self._meshz = self.get_mesh_coord(3)\n else:\n self._meshz = None\n\n return self._meshy", "def getz(self,pix):\n\t\tx = pix[0]\n\t\ty = pix[1]\n\t\treturn self.cube_data[x][y]", "def getZMin(self):\n return self.zmin", "def GetZoneOffset(self):\n if self.zDirection is None:\n return None\n else:\n return self.zDirection * self.zOffset", "def z(self) -> float:\n return self.A[3] if self.scalar_vector else self.A[2]", "def z(self):\n return self[:, 2]", "def locations_z(self):\n if self.is_depth:\n return [-z for z in reversed(self._locations[2])]\n return self._locations[2]", "def z_halo(self): \n return self.coords_halo[2]", "def z_index(self):\n return self._z_index", "def getVelZ(self):\n return self.posvel.getZ()", "def getZOffsetExplicitlySet(self):\n return _libsbml.Point_getZOffsetExplicitlySet(self)", "def getSurfAlongZ(self):\n\n return self._surf", "def cells_z(self):\n if self.is_depth:\n return list(reversed(self._cells[2]))\n return self._cells[2]", "def get_coord_val(self, x, y, z):\n if self.is_4d():\n #return self._data[y, x, z, self._time_point]\n return self._data[self._y_shift - y, x, z, self._time_point]\n else:\n #return self._data[y, x, z]\n return self._data[self._y_shift - y, x, z]", "def z(self):\r\n return self.unif[2]", "def get_value(self, x, y, z):\n\t\treturn self.data[ self.xyz_to_offset(x,y,z) ]", "def N_z(self) -> int:\n return self.params.N_z", "def xyz(self):\n return (self.x(), self.y(), self.z())", "def z_min(self):\n return self.get_min_value(self.Z_INDEX)", "def zmax(self):\n return self._zi", "def idx_z(self, zval):\r\n iz = np.around((zval - self.oz) / self.dz)\r\n return int(iz)", "def x_value(self, z):\n return z * self.p + self.n", "def Get_CalOutZ_Value(self):\r\n z = self.Get_RawOutZ_Value()\r\n if(z >= self.minZ and z <= self.maxZ):\r\n return 0\r\n else:\r\n return z - self.meanZ", "def get_z_for(self, dt, field):\n return self.get_z(dt)", "def z(self) -> NumType:\n return abs(self._ohms)", "def get_zarr(self, position):\n pos_info = self.position_map[position]\n well = pos_info['well']\n pos = pos_info['name']\n return self.store[well][pos][self.arr_name]", "def point(self):\n return self.x, self.y, self.z", "def z_max(self):\n return self.get_max_value(self.Z_INDEX)", "def xyz(self) -> np.ndarray:\n return np.vstack((self.x, self.y, self.z)).transpose()", "def ThRZcoords(self):\n Th, R = self.ThRcoords()\n\n if self.axial >= 0 and self.axial < len(self.ThRZmesh.getPositions(label=\"Z\")):\n Z = (\n self.ThRZmesh.getUpper(label=\"Z\", n=(self.axial - 1))\n + self.ThRZmesh.getUpper(label=\"Z\", n=(self.axial))\n ) * 0.5\n else:\n runLog.warning(\n \"Error: Axial Index ({0}) location not INSIDE mesh \".format(self.axial)\n )\n runLog.warning(self.ThRZmesh.getPositions(label=\"Z\"))\n Z = None\n\n return Th, R, Z", "def xyz(self: Q) -> np.array:\n\n return np.array([self.x, self.y, self.z])", "def getAngVelZ(self):\n return self.angvel.getZ()", "def get_z_delta(self, z):\n if self.z is None:\n raise UnknownCarriagePosition\n\n z_delta = z - self.z\n error = z_delta % copysign(self.stepper.MM_PER_STEP, z_delta)\n return z_delta, error", "def GetZ(self, dme, dmhost):\n z = fsolve(self.DMeq, self.z0, args=(dme, dmhost))\n z = float(z)\n return z", "def get_dndz(self, z):\n return self._dndz(z)", "def unit_z(cls):\n return cls(0, 0, 1)", "def z(self) -> np.ndarray:\n return self.array[:, 3] if self.scalar_vector else self.array[:, 2]", "def get_xyz(self, xyz):\n if cm.mag(xyz) < self.get_actual_inner_boundary():\n val = np.array([np.NaN, np.NaN, np.NaN])\n else:\n points = self.__get_points_object__([xyz])\n val = self.__get_data_at_points__(points)[0]\n\n # print (val)\n return val", "def vec_z(self):\t\t\t\r\n if self.oz != 0:\r\n ov = self.oz\r\n lv = self.self.lz + self.oz\r\n else:\r\n ov = self.dz / 2\r\n lv = self.lz\r\n\r\n zv = \"\"\r\n for num in np.arange(ov, lv, self.dz):\r\n zv += str(num) + \" \"\r\n\r\n return zv", "def magnitude(self):\n return math.sqrt(self.x**2 + self.y**2 + self.z**2)", "def xyz(self, i):\n return self.xp[i], self.yp[i], self.zp[i]", "def omLz(self,z):\n return self.omL/(self.omL + self.omR*(1.0 + z)**2 + self.om0*(1.0 + z)**3)", "def three_dimensional(self, z): # Maybe I misunderstood the task. My method looks weird\n return (self.x, self.y, z)", "def get_lz(self):\r\n return self.dz * self.nz - self.oz", "def z_score(self, x):\n\n mean = self.mean\n stddev = self.stddev\n\n z = (x - mean) / stddev\n\n return z", "def Get_RawOutZ_Value(self):\r\n l = self.__readFromRegister(self.__REG_R_OUT_Z_L, 0xff)\r\n h_u2 = self.__readFromRegister(self.__REG_R_OUT_Z_H, 0xff)\r\n h = bitOps.TwosComplementToByte(h_u2)\r\n if (h < 0):\r\n return (h*256 - l) * self.gain\r\n elif (h >= 0):\r\n return (h*256 + l) * self.gain", "def nCz(self):\n if self.dim < 3:\n return None\n return int(self._n[2])", "def mz(self):\n return self._mz.copy()", "def xyz(self):\n xyz = np.zeros((len(self), 3))\n\n xyz[:len(self.qc_mol), ...] = self.qc_mol.xyz\n xyz[len(self.qc_mol):len(self.qc_mol) + len(self.br_mol), ...] = self.br_mol.xyz\n xyz[-len(self.pc_mol):, ...] = self.pc_mol.xyz\n\n return xyz", "def setZ(self, *args):\n return _libsbml.Point_setZ(self, *args)", "def height_at(self, x, z):\n\n return self.heightmap[x * 16 + z]", "def setZ(self, z):\n self.position.setZ(z)", "def get_zscore_data(self):\n self.update_filter_inds()\n return _z_score(self)", "def getZ(self, hand=None):\n\n raise NotImplementedError", "def get_spawn_xyz(self):\n return self.X, self.Y, self.Z", "def get_zlabel(self):\n return self._frame.GetZaxis().GetTitle()", "def n_z(self, level):\n resolution = self.resolution(level)\n return (self.z_extent // resolution + 63) // 64", "def xyz(self) -> np.ndarray:\n return self._vector[0:3]", "def get_zdim(self):\n return self.decoder.get_input_info_dict()['latent_vector'].get_shape()[1]", "def position3d(self) -> Point3:\n return Point3.from_proto(self.proto.pos)", "def get_z_mean(self):\n pi = torch.sigmoid(self.qz_log_alpha)\n return torch.clamp(pi * (self.zeta - self.gamma) + self.gamma, min=0.0, max=1.0)", "def alphahighz(self, z):\n return self.alphaMe(3.8,self.r_vect[0],self.alpha0_vect[0]) - 0.018*(z-3.8)", "def z_eq(self):\n theta = self.T_cmb/2.7\n return 25000.*self.Omega_m*self.h**2.*theta**-4.", "def _elevation(self, node):\n return self.graph_provider.get_coords(node)['z']", "def z(self) -> int:" ]
[ "0.8616124", "0.86023843", "0.8559942", "0.84333587", "0.83052397", "0.82192296", "0.80853426", "0.80163383", "0.80163383", "0.80163383", "0.79635745", "0.79482794", "0.78137195", "0.77216375", "0.7505032", "0.74130934", "0.73766494", "0.7360839", "0.730008", "0.729796", "0.72931254", "0.7246056", "0.7219753", "0.7181541", "0.7157797", "0.71193194", "0.710592", "0.7101097", "0.70534045", "0.6912274", "0.68670374", "0.68331176", "0.682688", "0.6806713", "0.6790576", "0.6760728", "0.6736889", "0.6692905", "0.66607594", "0.66423804", "0.66387546", "0.66124094", "0.65902686", "0.65851873", "0.6573252", "0.6566129", "0.64847934", "0.6459741", "0.64435697", "0.6442384", "0.64385873", "0.6427019", "0.6409586", "0.6396548", "0.6385447", "0.6371177", "0.6341145", "0.6331995", "0.63251114", "0.6317403", "0.629077", "0.6234929", "0.6219499", "0.6218598", "0.61804116", "0.6153246", "0.6148383", "0.61450905", "0.61387825", "0.6135352", "0.6134496", "0.61294496", "0.61278397", "0.61031955", "0.60992455", "0.6091215", "0.60727066", "0.6071683", "0.6066251", "0.6049407", "0.603317", "0.6020497", "0.60039806", "0.6002144", "0.59977925", "0.59904087", "0.59893066", "0.596093", "0.5945253", "0.5941103", "0.59316736", "0.5928508", "0.5898842", "0.5897678", "0.5869034", "0.58678925", "0.5841863", "0.5839867", "0.5838467", "0.5834551" ]
0.8734822
0
Gets the mass of this atom. Returns
def get_mass(self): return self.m
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mass(self):\n return self._mass", "def mass(self):\n return self._mass", "def get_mass(self):\n _pal.lib.geometry_get_mass.restype = c.c_float\n return _pal.lib.geometry_get_mass(self._geometry)", "def mass(self):\n return self._getAttribute(Attribute.mass)", "def getMolecularMass(self):\n dataDict = self.__dict__\n # get formula dictionary\n dd = {}\n for ca in self.chemAtoms:\n if isinstance(ca, ChemAtom):\n ss = ca.elementSymbol\n ii = dd.get(ss)\n if ii is None:\n dd[ss] = 1\n else:\n dd[ss] = ii + 1\n \n # calculate mass\n xx = self.root.currentChemElementStore\n result = sum(ii * xx.findFirstChemElement(symbol=ss).mass for (ss, ii) in dd.iteritems())\n return result", "def mass(self):\n return self._P", "def mass(self):\n self.check_symbols()\n return self._tree_mass(self._tokentree())", "def getMass(self):\n return self.mass", "def total_mass(self):\n return self._total_mass", "def total_mass(self):\n return self._total_mass", "def mass(self):\n\t\treturn self.volume*self.density", "def mass(self):\n return _cantera.reactor_mass(self.__reactor_id)", "def Mass(self):\n mpa = self.MassPerLength()\n if mpa == 0.0:\n return 0.\n L = self.Length()\n mass = L * mpa\n\n #try:\n #mass = (self.Rho() * self.Area() + self.Nsm()) * L\n #except TypeError:\n #msg = 'TypeError on eid=%s pid=%s:\\n' % (self.eid, self.Pid())\n #msg += 'rho = %s\\narea = %s\\nnsm = %s\\nL = %s' % (self.Rho(),\n # self.Area(),\n # self.Nsm(), L)\n #raise TypeError(msg)\n\n return mass", "def particleMass(self):\n return self.params['particleMass']", "def mass(self) -> Mass:\n return self.weight", "def m1(self):\n return self.mass[0]", "def get_mass(atomic_symbol: str) -> float:\n\n if atomic_symbol in _masses.keys():\n return _masses[atomic_symbol]\n\n else:\n return 0", "def get_mass(elem):\n return mass[get_num(elem)]", "def get_mass(element):\n return pt.elements.isotope(element).mass", "def getMasses(self):\n try:\n return self._massList\n except AttributeError:\n self._massList = [float(x) for x in self._raw_data['MASS']]\n return self._massList", "def molarMass(matID):\n mat = goodID(matID)\n compound = xl.CompoundParser(mat)\n return atomWeight(matID) * compound['nAtomsAll']", "def calc_mass(self):\n\n star = self.star\n\n K, N = star.mesh_size\n mu = star.mu_coords\n r = star.r_coords\n\n def Q1(j):\n return np.sum((mu[2::2] - mu[:-2:2]) *\n (star.rho[:-2:2, j] + 4 * star.rho[1:-1:2, j] +\n star.rho[2::2, j])) / 6\n\n mass = 0\n\n for j in range(0, N - 2, 2):\n mass += (r[j + 2] - r[j]) * (r[j]**2 * Q1(j) +\n 4 * r[j + 1]**2 * Q1(j + 1) +\n r[j + 2]**2 * Q1(j + 2))\n\n return 2 / 3 * np.pi * mass", "def mass(self, star_mass=1.0):\n m_mj = 0.004920266275467775 * star_mass**(2./3) \\\n * self.P**(1./3) * self.K * np.sqrt(1-self.e**2)\n return m_mj", "def meanMolarMass(self):\n return _cantera.phase_meanmolwt(self._phase_id)", "def mass(self):\n\t\traise NotImplementedError", "def mu(self):\n return self.mass * G", "def mass(self, element):\n return self.m(element)", "def mass(self) -> Mass:\n return Mass(0.0)", "def _get_molecule_center_of_mass(self):\n center_of_mass = np.zeros([3], dtype=float)\n masses = self._prmtop[\"MASS\"]\n for atom_ind in range(len(self._crd)):\n center_of_mass += masses[atom_ind] * self._crd[atom_ind]\n total_mass = masses.sum()\n if total_mass == 0:\n raise RuntimeError(\"zero total mass\")\n return center_of_mass / total_mass", "def enthalpy_mass(self):\n return _cantera.reactor_enthalpy_mass(self.__reactor_id)", "def intEnergy_mass(self):\n return _cantera.reactor_intEnergy_mass(self.__reactor_id)", "def mass_eval(self):\n # Calculate lengths\n L = np.zeros(self.m)\n for i in range(self.m):\n L[i] = np.linalg.norm(self.coord[self.con[i, 0], :] - self.coord[self.con[i, 1], :])\n\n # Calculate total mass\n self.mass = 0\n for i in range(self.m):\n self.mass += L[i]*self.WEIGHT[int(self.sizes[i])]", "def get_total_mass(self) -> int:\n total_mass = 0\n for i_complex, i_abundance in self._complexes.items():\n total_mass += i_complex.get_size_of_complex() * i_abundance\n return total_mass", "def mass_energy(particle: Particle, mass_numb: Optional[Integral] = None) -> u.J:\n return particle.mass_energy", "def M(self):\n return self._properties['M']", "def list_masses(self):\n masses = self.contents['Sub_ID']\n for i in range(self.num_atom_types):\n masses = np.where(masses == i, float(self.masses[i]), masses)\n self.contents['Mass'] = masses", "def total_mass_amu(self):\n return np.sum(self.mass_amu)", "def massmatrix(self):\n # lazy evaluation, compute the mass matrix at the first request and\n # store it until the triangular mesh or the discretization method\n # is changed\n if self._massmatrix is None:\n self._massmatrix = self.triangsamples.massmatrix(mode='normal')\n\n return self._massmatrix", "def get_mol_masses(mol):\n return np.array([a.GetMass() for a in mol.GetAtoms()])", "def total_mass(self, value):\n self._total_mass = round(value, 1)", "def mass(self, polymer='rna'):\n sequence = self.seq.upper()\n\n a = sequence.count('A')\n c = sequence.count('C')\n g = sequence.count('G')\n t = u = sequence.count('T') + sequence.count('U')\n\n if polymer == 'rna':\n return (a * 329.2) + (u * 306.2) + (c * 305.2) + (g * 345.2) + 159\n elif polymer == 'dna':\n return ((a + t) * 617.4) + ((g + c) * 618.4) - 124\n elif polymer == 'ssdna':\n return (a * 313.2) + (t * 304.2) + (c * 289.2) + (g * 329.2) - 62\n else:\n raise ValueError(\"unknown polymer type: '{}'\".format(polymer))", "def calc_mass(self):\n\n star = self.star\n\n M, K, N = star.mesh_size\n ph = star.phi_coords\n mu = star.mu_coords\n r = star.r_coords\n\n def Q1(j, k):\n sum = 0\n\n for i in range(0, M - 2, 2):\n sum += (1 / 6) * (ph[i + 2] - ph[i]) * (star.rho[i, j, k] +\n 4 *\n star.rho[i + 1, j, k]\n + star.rho[i + 2, j, k])\n\n return 2 * sum\n\n def Q2(k):\n sum = 0\n\n for j in range(0, K - 2, 2):\n sum += (1 / 6) * (mu[j + 2] - mu[j]) * \\\n (Q1(j, k) + 4 * Q1(j + 1, k) + Q1(j + 2, k))\n\n return 2 * sum\n\n mass = 0\n\n for k in range(0, N - 2, 2):\n mass += (1 / 6) * (r[k + 2] - r[k]) * (r[k]**2 * Q2(k) +\n 4 * r[k + 1]**2 * Q2(k + 1) +\n r[k + 2]**2 * Q2(k + 2))\n\n return mass", "def by_mass(self):\n try:\n mass = self._data_cache['mass']\n except:\n chemicals = self.chemicals\n self._data_cache['mass'] = mass = \\\n ChemicalMassFlowIndexer.from_data(\n SparseVector.from_dict(\n MassFlowDict(self.data.dct, chemicals.MW),\n chemicals.size\n ),\n self._phase, chemicals,\n False\n )\n return mass", "def total_mass(self):\n del self._total_mass", "def calculate_molecular_mass(symbols):\n\n mass = 0\n for atom in symbols:\n mass += atom_weigths[atom]\n\n return mass", "def RelativisticMass(self):\n return Particle.LorentzFactor(self) * self.restMass", "def calculate_molar_mass(collector):\n avg_temp = collector.get_average_temperature()\n avg_acceleration = collector.get_average_acceleration()\n ground_pressure = collector.get_ground_pressure()\n numerator = 0\n denominator = 0\n for altitude, pressure in\\\n collector.get_iter('altitude', 'pressure'):\n try:\n numerator -= (Calculator.R * avg_temp /\n avg_acceleration / altitude *\n math.log(pressure / ground_pressure))\n except ZeroDivisionError:\n pass\n else:\n denominator += 1\n if denominator == 0:\n raise NoDataError('No altitude/pressure to calculate molar mass')\n return numerator / denominator", "def by_mass(self):\n try:\n mass = self._data_cache['mass']\n except:\n chemicals = self.chemicals\n size = chemicals.size\n MW = chemicals.MW\n self._data_cache['mass'] = mass = \\\n MassFlowIndexer.from_data(\n SparseArray.from_rows([\n SparseVector.from_dict(MassFlowDict(i.dct, MW), size)\n for i in self.data\n ]),\n self.phases, chemicals,\n False\n )\n return mass", "def massFraction(self, species):\n k = self.speciesIndex(species)\n return _cantera.phase_massfraction(self._phase_id,k)", "def total_mass_au(self):\n return np.sum(self.atomic_mass)", "def m2(self):\n return self.mass[1]", "def center_of_mass(self, tolerance=1e-9):\n props = GProp_GProps()\n brepgprop_VolumeProperties(self.topods_solid(), props, tolerance)\n com = props.CentreOfMass()\n return geom_utils.gp_to_numpy(com)", "def Momentum(self):\n return (np.multiply(Particle.LorentzFactor(self)\n , np.array(self.velocity,dtype=float))* self.restMass)", "def mass_from_composition(composition):\n mass = 0.0\n for k, v in composition.items():\n if k == 0: # electron\n mass -= v * 5.489e-4\n else:\n mass += v * relative_atomic_masses[k - 1]\n return mass", "def mass(self):\n self.convert_window(\"Mass\", \"kilograms\", [\"Earth masses\", \"Solar masses\", \"carats\", \"cental\", \"decagrams\", \"femtograms\", \"grains\", \"grams\", \"hectograms\", \"hundredweights\", \"kilograms\", \"kilotonnes\", \"megatonnes\", \"micrograms\", \"milligrams\", \"nanograms\", \"ounces(US & UK)\", \"ounces(precious metals)\", \"picograms\", \"pounds(US & UK)\", \"pounds(precious metals)\", \"slugs\", \"stones\", \"tonnes(metric)\", \"tons(UK)\", \"tons(US)\"])", "def getM(self):\r\n return self.M", "def dist_mass(self, Mp):\r\n\r\n Mearth = np.array(Mp, ndmin=1) * u.earthMass\r\n\r\n tmp = ((Mearth >= self.Mprange[0]) & (Mearth <= self.Mprange[1])).astype(float)\r\n Mjup = Mearth.to(\"jupiterMass\").value\r\n\r\n return tmp * Mjup ** (-1.3)", "def calculate_protein_mass(protein: str):\n result = 0\n for p in protein:\n result += monoisotopic_mass_table[p]\n return result", "def get_mc(self) -> int:\n return self.MC", "def mean_by_mass(self, name):\n m = np.asanyarray(self[\"mass\"])\n ret = array.SimArray(\n (self[name].transpose() * m).transpose().mean(axis=0) / m.mean(), self[name].units)\n\n return ret", "def halo_mass(self, index):\n return self.data[self.data[\"hostIndex\"] == index][\n \"particleNumber\"\n ].sum()", "def halo_mass(self, index):\n return self.data[self.data[\"hostIndex\"] == index][\n \"particleNumber\"\n ].sum()", "def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)", "def calculate_molar_mass_method2(collector):\n avg_temp = collector.get_average_temperature()\n avg_acceleration = collector.get_average_acceleration()\n altitude_list = []\n pressure_list = []\n for altitude, pressure in\\\n collector.get_iter('altitude', 'pressure'):\n altitude_list.append(altitude)\n pressure_list.append(pressure)\n if len(altitude_list) == 0:\n raise NoDataError('No altitude/pressure to calculate molar mass')\n return molar_mass(avg_temp, avg_acceleration,\n altitude_list, pressure_list)", "def cal_mass(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for mass routine)')\n\n\n \n if self.E**2-self.px**2-self.py**2-self.pz**2>1e-7: #precision problem\n self.mass=math.sqrt(self.E**2-self.px**2-self.py**2-self.pz**2)\n else:\n self.mass=0", "def calculate_mass(self\n ):\n\n\n # Initialize array of Nan's for mass column of route_df\n full_mass_column = np.zeros(len(self.route_df.index))\n full_mass_column[:] = np.nan\n\n order = np.sort(self.stop_nn_indicies.ravel())\n\n\n for i in range(len(self.mass_array)): \n full_mass_column[order[i]] = self.mass_array[i]\n \n \n # Set initial and value to unloaded bus mass.\n full_mass_column[0] = self.unloaded_bus_mass\n full_mass_column[-1] = self.unloaded_bus_mass\n\n\n for i in range(len(full_mass_column)-1):\n if np.isnan(full_mass_column[i]):\n full_mass_column[i] = full_mass_column[i-1]\n else:\n continue\n\n return full_mass_column", "def totalMass(self, trunc=None):\n if trunc is None:\n trunc = self.trunc\n rVir = self.U.rVir(m, z)\n rS, rhoS, c = self.rS_rhoS_c(m, z)\n # truncation radius over scale radius\n xMax = trunc * rVir/rS\n result = 4./3. * np.pi * rS**3 * rhoS\n result = xMax - np.log(1 + xMax)\n return result", "def totalMass(self, trunc=None):\n if trunc is None:\n trunc = self.trunc\n rVir = self.U.rVir(m, z)\n rS, rhoS, c = self.rS_rhoS_c(m, z)\n # truncation radius over scale radius\n xMax = trunc * rVir/rS\n result = 4./3. * np.pi * rS**3 * rhoS\n result = xMax - np.log(1 + xMax)\n return result", "def test_get_mass(self):\n self.assertEqual(get_element_mass(1), (1.00782503224, 1)) # test input by integer\n self.assertEqual(get_element_mass('Si'), (27.97692653465, 14)) # test string input and most common isotope\n self.assertEqual(get_element_mass('C', 13), (13.00335483507, 6)) # test specific isotope\n self.assertEqual(get_element_mass('Bk'), (247.0703073, 97)) # test a two-element array (no isotope data)", "def mu(self):\n return self.generic_getter(get_chemical_potential, \"mu\", \"convert_energy\")", "def atomWeight(matID):\n mat = goodID(matID)\n compound = xl.CompoundParser(mat)\n mass = 0.0\n for i in range(compound['nElements']):\n mass += xl.AtomicWeight(compound['Elements'][i]) * compound['massFractions'][i]\n return mass", "def ComponentMass(filename,particle_type):\n # read in data from file\n # numpy function will automatically organized labelled columns into\n # an array\n alldata = np.genfromtxt(filename,dtype=None,names=True,skip_header=3)\n\n # save the row indices of all particles of our given type\n indices = np.where(alldata['type'] == particle_type)\n\n # slice an array containing the masses of these particles\n # these values are in units of 10^10 Msun\n masses = alldata['m'][indices]\n\n # calculate the sum of all these masses\n total_mass = np.sum(masses)\n\n # return this number in units of 10^12 Msun, rounded to 3 places\n # this number is already in units of 10^10 Msun\n return np.around(total_mass/1e2,3)", "def get_stir_mass_element(tracer_id, model):\n filepath = paths.stir_filepath(tracer_id, model)\n with open(filepath, 'r') as f:\n line = f.readline()\n mass = float(line.split()[3])\n\n return mass", "def mass_energy():\n c2 = _si.c.value**2\n return Equivalency(\n [\n (si.kg, si.J, lambda x: x * c2, lambda x: x / c2),\n (si.kg / si.m**2, si.J / si.m**2, lambda x: x * c2, lambda x: x / c2),\n (si.kg / si.m**3, si.J / si.m**3, lambda x: x * c2, lambda x: x / c2),\n (si.kg / si.s, si.J / si.s, lambda x: x * c2, lambda x: x / c2),\n ],\n \"mass_energy\",\n )", "def get_M(self):\n return 1.0", "def M(self) -> int:\n return self.params.M", "def centre_of_mass(mol):\n\n numatoms = mol.GetNumAtoms()\n conf = mol.GetConformer()\n if not conf.Is3D():\n return 0\n # get coordinate of each atoms\n pts = np.array([list(conf.GetAtomPosition(atmidx)) for atmidx in range(numatoms)])\n atoms = [atom for atom in mol.GetAtoms()]\n mass = Descriptors.MolWt(mol)\n # get center of mass\n center_of_mass = np.array(np.sum(atoms[i].GetMass() * pts[i] for i in range(numatoms))) / mass\n return center_of_mass", "def get_mass(molecular_system, element ='atom', selection = 'all', syntax = 'MolSysMT'):\n\n from molsysmt.basic import get\n from molsysmt.physchem.atoms.mass import physical, units\n\n values=physical\n\n output = []\n if element == 'atom':\n atom_types = get(molecular_system, element=element, selection=selection, syntax=syntax, atom_type=True)\n for ii in atom_types:\n output.append(values[ii.capitalize()])\n elif element in ['group', 'component', 'molecule', 'chain', 'entity']:\n atom_types_in_element = get(molecular_system, element=element, selection=selection,\n syntax=syntaxi, atom_type=True)\n for aux in atom_types_in_element:\n output.append(np.sum([values[ii.capitalize()] for ii in aux]))\n elif element == 'system':\n atom_types_in_element = get(molecular_system, element='atom', selection='all',\n syntax=syntax, atom_type=True)\n output.append(np.sum([values[ii.capitalize()] for ii in atom_types_in_element]))\n\n if element =='system':\n output = output[0]*puw.unit(units)\n else:\n output = puw.quantity(np.array(output), units)\n\n return output", "def get_mc(self) -> int:\r\n return self.mc\r\n raise NotImplementedError", "def area(self):\n geometry_properties = GProp_GProps()\n brepgprop_SurfaceProperties(self.topods_shape(), geometry_properties)\n return geometry_properties.Mass()", "def area(self):\n geometry_properties = GProp_GProps()\n brepgprop_SurfaceProperties(self.topods_shape(), geometry_properties)\n return geometry_properties.Mass()", "def massFractions(self):\n nsp = self._contents.nSpecies()\n y = zeros(nsp,'d')\n for k in range(nsp):\n y[k] = self.massFraction(k)\n return y", "def calculate_radius_mass(collector):\n accel = []\n alti = []\n for accel_val, alti_val in\\\n collector.get_iter('acceleration', 'altitude'):\n accel.append(accel_val)\n alti.append(alti_val)\n if len(alti) == 0:\n raise NoDataError('No altitude data to calculate radius/mass')\n return radius_mass(alti, accel, 1e3, 1e7)", "def volume(self, tolerance=1e-9):\n props = GProp_GProps()\n brepgprop_VolumeProperties(self.topods_solid(), props, tolerance)\n return props.Mass()", "def muon(self) -> CellAtom:\n return self._cell_atoms[self._muon_index]", "def mass_number(isotope):\n\n try:\n isotope = isotope_symbol(isotope)\n mass_numb = Isotopes[isotope][\"mass_number\"]\n except TypeError:\n raise(\"Incorrect type for mass_number input.\")\n except ValueError:\n raise ValueError(\"Mass number not able to be found from input \"\n f\"{isotope}\")\n\n return mass_numb", "def Mass_in_R(self, r):\n return self.int_over_density(r)", "def omega(self, mass: float) -> float:\n return np.sqrt(self.spring_constant / mass)", "def atoms(self, symbol): \n # this is a stub implementation\n #return 10;\n if symbol not in _atomic_mass: raise KeyError( symbol + \" is not in the table\")\n if symbol in _atomic_mass and symbol not in self._gettokens():\n return 0\n #the method is similar to __iter__, just different return\n parse = re.findall(r'([A-Z][a-z]*)(\\d*)|(\\()|(\\))(\\d*)', str(self.dele_mole))\n if symbol in _atomic_mass and symbol in self._gettokens():\n sym_num = [collections.Counter()]\n for name, n1, left_open, right_open, n2 in parse:\n if name:\n sym_num[-1][name] += int(n1 or 1) \n if left_open:\n sym_num.append(collections.Counter())\n if right_open:\n top = sym_num.pop()\n for s in top:\n sym_num[-1][s] += top[s] * int(n2 or 1) \n return sym_num[-1][symbol]", "def read_mass(terms, masses):\n # Check that Masses line is correctly formatted\n try:\n assert len(terms) == 2\n atype = int(terms[0])\n assert atype > 0 and atype <= len(masses)\n mass = float(terms[1])\n assert mass > 0\n except:\n raise FileFormatError('Invalid mass term')\n \n if masses[atype - 1] is None:\n masses[atype - 1] = mass\n else:\n raise FileFormatError(f'Multiple masses listed for atom type {atype}')", "def center_of_mass(self, entity, geometric=False):\n\n # Structure, Model, Chain, Residue\n if isinstance(entity, Entity.Entity):\n atom_list = entity.get_atoms()\n # List of Atoms\n elif hasattr(entity, \"__iter__\") and [x for x in entity if x.level == \"A\"]:\n atom_list = entity\n # Some other weirdo object\n else:\n raise ValueError(\n f\"Center of Mass can only be calculated from the following objects:\\n\"\n f\"Structure, Model, Chain, Residue, list of Atoms.\"\n )\n\n masses = []\n positions = [[], [], []] # [ [X1, X2, ..] , [Y1, Y2, ...] , [Z1, Z2, ...] ]\n\n for atom in atom_list:\n masses.append(atom.mass)\n\n for i, coord in enumerate(atom.coord.tolist()):\n positions[i].append(coord)\n\n # If there is a single atom with undefined mass complain loudly.\n if \"ukn\" in set(masses) and not geometric:\n raise ValueError(\n f\"Some atoms don't have an element assigned.\\n\"\n f\"Try adding them manually or calculate the geometrical center of mass instead.\"\n )\n\n if geometric:\n return [sum(coord_list) / len(masses) for coord_list in positions]\n else:\n w_pos = [[], [], []]\n for atom_index, atom_mass in enumerate(masses):\n w_pos[0].append(positions[0][atom_index] * atom_mass)\n w_pos[1].append(positions[1][atom_index] * atom_mass)\n w_pos[2].append(positions[2][atom_index] * atom_mass)\n\n return [sum(coord_list) / sum(masses) for coord_list in w_pos]", "def read_cm(self):\n\n reading = self.read()\n if not reading:\n return None\n\n if reading.unit == \"mm\":\n return reading.value * 10\n if reading.unit == \"in\":\n return reading.value * 2.54\n\n # Unlikely, but future proof.\n raise \"Reading has unknown unit: %s\" % reading.unit", "def get_all_masses(self):\n allMasses = set()\n for interval in self.mz_tree:\n allMasses.add( interval.data[\"mass\"] )\n\n return allMasses", "def richness_to_mass(richness, norm=2.7e13, slope=1.4):\n mass = norm * ((richness / 20.) ** slope)\n return mass", "def _compute_mass(box_size, evo_config):\n\n # ensure format\n standard_volume = evo_config['individuals']['standard_volume']\n if isinstance(box_size, list):\n if len(box_size) == 1: # sphere\n box_size = box_size[0]\n box_size = np.asarray(box_size)\n\n if np.prod(box_size.shape) < 2: # sphere\n return 4 / 3 * np.pi * box_size**3 / standard_volume\n else: # box\n if np.ndim(box_size) == 1:\n return np.prod(box_size * 2) / standard_volume\n else:\n return np.prod(box_size * 2, axis=1) / standard_volume", "def ion_mass(argument, Z=None, mass_numb=None):\n\n if isinstance(argument, u.Quantity) and Z is None and mass_numb is None:\n\n try:\n m_i = argument.to(u.kg)\n except Exception:\n raise u.UnitConversionError(\"If the ion in given as a Quantity, \"\n \"then it must have units of mass.\")\n\n if np.isclose(m_i.value, const.m_e.value, atol=1e-33): # positrons\n return const.m_e\n elif 1.66e-27 <= m_i.value < 7e-25: # mass range of known isotopes\n return m_i\n else:\n warn(\"The mass that was inputted to ion_mass and is being returned\"\n \" from ion_mass is outside of the range of known isotopes or \"\n \"electrons/ions.\", UserWarning)\n return m_i\n\n if isinstance(argument, str) and \\\n str(argument).lower() in ['e+', 'positron', 'e', 'e-', 'electron']:\n return const.m_e\n\n if argument in ['p', 'p+'] or str(argument).lower() in \\\n ['proton', 'protium'] and Z is None:\n return const.m_p\n elif _is_antiproton(argument) and Z is None:\n return const.m_p\n\n if _is_neutron(argument, mass_numb):\n raise ValueError(\"Use isotope_mass or m_n to get mass of neutron\")\n\n if isinstance(argument, str):\n argument, Z_from_arg = _extract_charge_state(argument)\n else:\n Z_from_arg = None\n\n if atomic_number(argument) == 1:\n if isinstance(argument, str) and 'H-1' in str(argument) and Z is None:\n return const.m_p\n if mass_numb == 1 and Z == 1:\n return const.m_p\n\n if Z is None and Z_from_arg is None:\n Z = 1\n elif Z is not None and Z_from_arg is not None and Z != Z_from_arg:\n raise ValueError(\"Inconsistent charge state information in ion_mass\")\n elif Z is None and Z_from_arg is not None:\n Z = Z_from_arg\n\n if isinstance(Z, str) and Z.isdigit():\n Z = int(Z)\n if isinstance(mass_numb, str) and mass_numb.isdigit():\n mass_numb = int(mass_numb)\n\n if not isinstance(Z, int):\n raise TypeError(\"In ion_mass, Z must be an integer representing the \"\n \"ionization state (e.g., Z=1 for singly ionized).\")\n\n if not isinstance(mass_numb, int) and mass_numb is not None:\n raise TypeError(\"In ion_mass, mass_numb must be an integer \"\n \"representing the mass number of an isotope.\")\n\n if atomic_number(argument) < Z:\n raise ValueError(\"The ionization state cannot exceed the \"\n \"atomic number in ion_mass\")\n\n try:\n isotope = isotope_symbol(argument, mass_numb)\n except Exception:\n is_isotope = False\n else:\n is_isotope = True\n\n if is_isotope:\n\n if isotope == 'D' and Z == 1:\n return 3.343583719e-27 * u.kg\n elif isotope == 'T' and Z == 1:\n return 5.007356665e-27 * u.kg\n\n atomic_mass = isotope_mass(isotope)\n\n else:\n\n try:\n atomic_mass = standard_atomic_weight(argument)\n except Exception: # coveralls: ignore\n\n errormessage = (\"No isotope mass or standard atomic weight is \"\n f\"available to get ion mass for {argument}\")\n\n if isinstance(mass_numb, int):\n errormessage += f\" with mass number {mass_numb}\"\n\n raise ValueError(errormessage)\n\n m_i = (atomic_mass - Z * const.m_e).to(u.kg)\n\n return m_i", "def isotope_mass(argument, mass_numb=None):\n\n argument, charge_state = _extract_charge_state(argument)\n\n if charge_state is not None and charge_state != 0:\n raise ValueError(\"Use ion_mass instead of isotope_mass for masses of \"\n \"charged particles\")\n\n try:\n isotope = isotope_symbol(argument, mass_numb)\n atomic_mass = Isotopes[isotope]['atomic_mass']\n except ValueError:\n raise ValueError(\"Unable to identify isotope in isotope_mass\")\n except TypeError:\n raise TypeError(\"Invalid input to isotope_mass\")\n\n return atomic_mass", "def _reduced_mass(structure) -> float:\n reduced_comp = structure.composition.reduced_composition\n num_elems = len(reduced_comp.elements)\n elem_dict = reduced_comp.get_el_amt_dict()\n\n denominator = (num_elems - 1) * reduced_comp.num_atoms\n\n all_pairs = combinations(elem_dict.items(), 2)\n mass_sum = 0\n\n for pair in all_pairs:\n m_i = Composition(pair[0][0]).weight\n m_j = Composition(pair[1][0]).weight\n alpha_i = pair[0][1]\n alpha_j = pair[1][1]\n\n mass_sum += (alpha_i + alpha_j) * (m_i * m_j) / (m_i + m_j) # type: ignore\n\n reduced_mass = (1 / denominator) * mass_sum\n\n return reduced_mass", "def distmeter(self):\n return self._distance.to(\"m\").value", "def test_atomic_masses():\n first = get_atomic_mass(\"As\")\n assert first == 74.9216\n \n second = get_atomic_mass(\"Be\")\n assert second == 9.012182\n\n third = get_atomic_mass(\"Li\")\n assert third == 6.941" ]
[ "0.8615101", "0.8615101", "0.8449791", "0.827885", "0.81125116", "0.7995724", "0.7925729", "0.7887645", "0.78556985", "0.78556985", "0.7763608", "0.76362807", "0.7610759", "0.7542108", "0.735307", "0.7303788", "0.7278605", "0.7205126", "0.7170194", "0.7108265", "0.7056702", "0.69589114", "0.69333863", "0.69306815", "0.6919366", "0.6872219", "0.68660146", "0.6797525", "0.67918766", "0.6771746", "0.6751246", "0.6727704", "0.6710783", "0.6619598", "0.6570514", "0.65604174", "0.6557276", "0.6536014", "0.65349454", "0.6527816", "0.6526131", "0.65237904", "0.64991987", "0.64798653", "0.6452518", "0.6418176", "0.64169306", "0.6402263", "0.63874084", "0.63685876", "0.6365871", "0.63424414", "0.6340516", "0.6324552", "0.62780243", "0.62523067", "0.6227903", "0.62028265", "0.61938363", "0.61825734", "0.6180295", "0.6180295", "0.6159335", "0.61575645", "0.6126973", "0.61095095", "0.60978234", "0.60978234", "0.60720575", "0.6040872", "0.6036657", "0.6031808", "0.60204947", "0.59676623", "0.5966555", "0.59645003", "0.59396684", "0.5931354", "0.5902083", "0.5894005", "0.5894005", "0.5873519", "0.5847009", "0.5846695", "0.5839526", "0.5838022", "0.58142644", "0.58035725", "0.5778425", "0.5773139", "0.5761484", "0.5748471", "0.57483006", "0.5747362", "0.57450587", "0.5724186", "0.5718342", "0.5717593", "0.57037795", "0.570105" ]
0.8783727
0
Gets the radius of this atom. Returns
def get_radius(self): return self.radius
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def radius(self):\n return self._radius", "def radius(self):\n return self._radius", "def radius(self):\n return self._radius", "def radius(self):\n return self._radius", "def radius(self):\n return self._radius", "def getRadius(self):\n return self.__radius", "def radius(self) -> float:\n return self._radius", "def getRadius(self):\n return self.radius", "def get_radius(self):\n return self.r", "def get_radius(self):\n return self.__radius", "def get_radius(self):\n return self.R", "def radius(self) -> float:\n return get_radius_from_element(self.element)", "def radius(self) -> Union[int, float]:\n return self.proto.radius", "def radius(self):\n return sqrt(self.radius_square())", "def radius(self):\n if self._radius is None:\n translated_xyz = translate_to_center_of_mass(self.get_xyz())\n _, symbols, x, y, z = get_xyz_matrix(translated_xyz)\n border_elements = list() # a list of the farthest element/s\n r = 0\n for si, xi, yi, zi in zip(symbols, x, y, z):\n ri = xi ** 2 + yi ** 2 + zi ** 2\n if ri == r:\n border_elements.append(si)\n elif ri > r:\n r = ri\n border_elements = [si]\n atom_r = max([get_atom_radius(si) if get_atom_radius(si) is not None else 1.50 for si in border_elements])\n self._radius = r ** 0.5 + atom_r\n logger.info('Determined a radius of {0:.2f} Angstrom for {1}'.format(self._radius, self.label))\n return self._radius", "def get_radius(self):\r\n return self._handler.get_radius()", "def get_radius(self):\n if self.no_dist is False:\n dist = self.distance\n radius = (dist * self.ang_size / 60. *\n np.pi/180. * ct._kpc_over_pc_)/2.\n self.radius = radius\n else:\n self.radius = -1 # use -1 to indicate unknown diameter\n\n return self.radius", "def radius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, self.vertices[0])", "def radius(self) -> float:\n return math.hypot(self.x, self.y)", "def radius(self) -> int:\n pass", "def inner_radius(self):\n return self._inner_radius", "def outer_radius(self):\n return self._outer_radius", "def getCoreRadius(self):\n return self.getNumRings(indexBased=True) * self.getFirstBlock().getPitch()", "def radius(self):\n if self._radius is None:\n self._radius = self.stem / 2\n if self._radius * 2 > self.stem:\n raise Exception('Invalid radius. Maximum radius = 2 * stem.')\n return self._radius", "def polar_radius(self):\n return self.r * (1 - self.f)", "def get_radius(self):\r\n return 1", "def mean_radius(self):\n return self._mean_radius", "def radius_square(self):\n try: \n return self._radius_2\n except AttributeError:\n center = self.center()\n self._radius_2 = max( (v.vector() - center).dot_product(\n v.vector() - center) for v in\n self.vertex_generator() )\n return self._radius_2", "def circumference(self):\n return (2 * math.pi * self.__radius)", "def circumference(self):\n return math.pi * self.radius * 2", "def get_radius(self):", "def circle_radius(self):\n return min([self.container.width, self.container.height]) / 4", "def eggleton_roche_radius(self):\n return self.eggleton_roche_over_separation() * self.separation()", "def getRadius(self):\r\n if len(self._indices)==0:\r\n return 0\r\n big=Cluster.distance(self,self._dataset.getPoint(self._indices[0]))\r\n for i in range (len(self._indices)):\r\n dist=Cluster.distance(self,self._dataset.getPoint(self._indices[i]))\r\n if (dist>big):\r\n big=dist\r\n return big", "def estimate_radius(self):\n red = self.T[:,:,0] # empirically, the most reliable channel\n\n eye_radius = red.sum(axis=1).max() / 2\n return eye_radius", "def scatteringRadius(self):\n\n return self.__scatteringRadius", "def radius(self):\n c = self.centroid()\n dmax = -np.inf\n for vertex in self.path.vertices:\n d = np.linalg.norm(vertex - c)\n if d > dmax:\n dmax = d\n return d", "def getPointRadius(self):\n l = [point.radius for point in self.points]\n if l.count(l[0]) == len(l):\n return l[0]\n else:\n raise ValueError(\"The radiuses of the points must be the same otherwise it makes no sense.\")", "def getScatteringRadius(self):\n\n return self.scatteringRadius", "def inradius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, cast(Segment, self.edges[0]).midpoint)", "def diameter(self):\n return self.radius * 2", "def diameter(self):\n return self.radius * 2", "def Radius(self, *args):\n return _Bnd.Bnd_Sphere_Radius(self, *args)", "def get_mean_radius(self):\n\n radius = np.array(self.coord_list)\n radius[:, 0] -= self.mean_pos[0]\n radius[:, 1] -= self.mean_pos[1]\n radius = np.sqrt(np.sum(radius ** 2, axis=1))\n mean_radius = np.mean(radius)\n return mean_radius", "def diameter(self):\n return 2 * self.radius", "def getPerimeter(self):\n return 2 * math.pi * self.__radius", "def circle_area(self):\n return np.pi * self.ring_radius ** 2", "def getArea(self):\n return math.pi * self.__radius * self.__radius", "def perimeter(self):\r\n\r\n return 2*math.pi*self.__radius", "def roundingRadius( self ):\n return self._roundingRadius", "def getArea(self):\n return math.pi * self.radius ** 2", "def get_radius():\n function = LegacyFunctionSpecification()\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN,\n description = \"Index of the particle to get the radius of. This index must have been returned by an earlier call to :meth:`new_particle`\")\n function.addParameter('radius', dtype='float64', direction=function.OUT, description = \"The current radius of the particle\")\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n #function.can_handle_array = True\n function.must_handle_array = True\n return function", "def mean_radius(self):\n return (self.semimajor_axis + self.semimedium_axis + self.semiminor_axis) / 3", "def area(self):\n return (self.__radius ** 2 * math.pi)", "def get_circle_radius(self, point, center):\n x, y, z = point[:]\n x0, y0, z0 = center[:]\n return math.sqrt((x-x0)**2 + (y-y0)**2 + (z-z0)**2)", "def area(self):\n return self.radius*self.radius*math.pi", "def get_receptive_field_radius(self):\n raise NotImplementedError()", "def getS(self):\n\t\tsValue = math.sqrt((math.pow(self.x,2)) + (math.pow(self.y,2)))/self.radius\n\t\treturn sValue", "def area(self):\r\n return math.pi*(self.__radius**2)", "def area(self):\n return math.pi*self._radius*self._radius", "def radius(self, value):\n self._radius = value", "def area(self):\n return math.pi * self.radius ** 2", "def area(self):\n return math.pi * self.radius ** 2", "def get_radius(center, rad, speed_limit):\n i = Intersection(center, rad, speed_limit)\n return i.get_radius()", "def getSphereRadius(self):\n return 1.5", "def area(self):\n return math.pi * math.pow(self.radius, 2)", "def circumference(self):\n return self.width + self.height", "def outer_rad(self):\n return self._outer_rad", "def get_van_Der_Waals_radius(self):\n return self.van_Der_Waals_radius", "def area(self):\n\t\t#print (self.radius*self.radius*math.pi)\n\t\tcircle_area = (self.radius*self.radius*math.pi)\n\t\treturn circle_area", "def inner_rad(self) -> Quantity:\n return self._inner_rad", "def radius(self,xc=None,yc=None):\n if xc == None:\n xc = self.x1\n if yc == None:\n yc = self.y1\n self.r = sqrt((self.x-xc)**2+(self.y-yc)**2)", "def __get_radius_of_curvature(self):\n t = self.t\n xp = np.cos((np.pi * t ** 2) / 2.0) # First derivative of x(t) (FresnelC)\n yp = np.sin((np.pi * t ** 2) / 2.0) # First derivative of y(t) (FresnelS)\n xpp = -np.pi * t * np.sin((np.pi * t ** 2) / 2.0) # Second derivative of x(t)\n ypp = np.pi * t * np.cos((np.pi * t ** 2) / 2.0) # Second derivative of y(t)\n return abs(\n ((xp ** 2 + yp ** 2) ** (3 / 2)) / (xp * ypp - yp * xpp)\n ) # Radius of curvature: https://en.wikipedia.org/wiki/Radius_of_curvature", "def r(self):\n\n r = np.sqrt(self.pos[:,0]**2 + self.pos[:,1]**2 + self.pos[:,2]**2)\n\n return r", "def radii(self) -> Quantity:\n return self._radii", "def get_radius(size):\n return (size * 10) - 5", "def hardSphereRadius(self):\n\n return self.__hardSphereRadius", "def Truncated_radius(self):\n r_trunc = fminbound(self.Mass_diff_005, -10., np.log10(self.scale_radius))\n return 10**float(r_trunc)", "def get_radius_of_curvature(self):\n t = self.t\n # Returns the radius of curvature for a normalized Euler curve at a position t\n xp = np.cos((np.pi * t ** 2) / 2.0) # First derivative of x(t) (FresnelC)\n yp = np.sin((np.pi * t ** 2) / 2.0) # First derivative of y(t) (FresnelS)\n xpp = -np.pi * t * np.sin((np.pi * t ** 2) / 2.0) # Second derivative of x(t)\n ypp = np.pi * t * np.cos((np.pi * t ** 2) / 2.0) # Second derivative of y(t)\n return self.scale_factor * abs(\n ((xp ** 2 + yp ** 2) ** (3 / 2)) / (xp * ypp - yp * xpp)\n ) # Radius of curvature: https://en.wikipedia.org/wiki/Radius_of_curvature", "def getRadius(animation, jointlock, frame, pointpos, handthick = 3.5):\n# TODO: testar esse caso!!!\n p1 = jointlock.getPosition(frame)\n p2 = jointlock.getChildren(0).getPosition(frame)\n p0 = np.asarray(pointpos)\n p0p1 = p0-p1\n p0p2 = p0-p2\n p2p1 = p2-p1\n d = np.linalg.norm(np.cross(p0p1,p0p2))/np.linalg.norm(p2p1)\n if handthick:\n d = d-3.5\n return d", "def _arc_radius(height_in_units):\n return height_in_units * _ARC_HEIGHT_UNIT / (1 - math.cos(_ANGLE))", "def get_char_radius(self, zmin=None, zmax=None, ratio=0.5):\n if zmin != None:\n if zmax == None: zmax = zmin + 10.\n ind = (self.z >= zmin)*(self.z <= zmax)\n xin = self.x[ind]; yin = self.y[ind]; zin = self.z[ind]\n self.z0 = (zmin+zmax)/2\n else:\n xin = self.x.copy(); yin = self.y.copy(); zin = self.z.copy()\n rArr = np.arange(400.)*2.+2.\n Nt = xin.size\n for r in rArr:\n if (xin[(xin**2+yin**2)<=r**2]).size > Nt*ratio:\n try:\n print 'z0 =',self.z0,' nm 1 sigma radius = ',r,' nm', ' Nin = ', (xin[(xin**2+yin**2)<=r**2]).size, 'Ntotal =', Nt\n except:\n print '1 sigma radius = ',r,' nm', ' Nin = ', (xin[(xin**2+yin**2)<=r**2]).size, 'Ntotal =', Nt\n break\n self.cr = r\n return", "def dist_radius(self, Rp):\r\n\r\n return self.logunif(Rp, self.Rprange.to(\"earthRad\").value)", "def signatureRadius(self):\n return self._getAttribute(Attribute.signatureRadius)", "def area_of_circle(radius):\n return radius", "def get_receptive_field_radius(self) -> int:\n\n receptive_field_radius = 0\n for feature_group in self.features_group_list:\n receptive_field_radius = max(\n receptive_field_radius, feature_group.receptive_field_radius\n )\n return receptive_field_radius", "def get_roi_circle(self):\n return self.circle_list", "def Rcoords(self):\n if self.radial > 0 and self.radial < len(self.ThRZmesh.getPositions(label=\"R\")):\n R = (self.radialInner() + self.radialOuter()) / 2.0\n else:\n # n = 0\n runLog.warning(\n \"Error: Radial Index ({}) location not INSIDE mesh \".format(self.radial)\n )\n runLog.warning(self.ThRZmesh.getPositions(label=\"R\"))\n R = None\n return R", "def parallel_radius(self, lat):\n\n return EARTH_RADIUS * lat.cos()", "def _get_radial(self):\n return self.startRadius is not None and self.endRadius is not None", "def value_circle(self):\r\n return self.circle", "def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):\n return self.getDimension(\"od\")", "def radians(self) -> float:\n return math.atan2(self.y, self.x)", "def get_length(self):\n return math.sqrt(self.x**2 + self.y**2)", "def get_interaction_length(self):\n return self.radius + 2.0 #in um", "def test_get_radius():\n center = Coordinates(7, 3)\n radius = 12\n\n returned_rad = get_radius(center, radius, 30)\n\n assert returned_rad == radius\n assert returned_rad != center.get_x()\n assert returned_rad != center.get_y()", "def geocentric_radius(self, longitude, latitude, longitude_semimajor_axis=0.0):\n latitude_rad = np.radians(latitude)\n longitude_rad = np.radians(longitude)\n longitude_semimajor_axis_rad = np.radians(longitude_semimajor_axis)\n\n coslat, sinlat = np.cos(latitude_rad), np.sin(latitude_rad)\n\n fc = self.meridional_flattening\n fb = self.equatorial_flattening\n\n radius = (self.semimajor_axis * (1.0 - fc) * (1.0 - fb)) / np.sqrt(\n 1.0\n - (2.0 * fc - fc**2) * coslat**2\n - (2.0 * fb - fb**2) * sinlat**2\n - (1.0 - fc) ** 2\n * (2.0 * fb - fb**2)\n * coslat**2\n * np.cos(longitude_rad - longitude_semimajor_axis_rad) ** 2\n )\n return radius", "def get_radiation():\n sun_pos = get_sun_position()\n if sun_pos <= POSITION_MIN or sun_pos >= POSITION_MAX:\n return 0\n else:\n # Calculate a new delta.\n delta = random.randint(0, RADIATION_DELTA)\n if random.random() > 0.5:\n delta = -1 * delta\n # Calculate the radiation based on the sun position.\n new_radiation = round(-0.1279 * pow(sun_pos, 2) + 46.05 * sun_pos - 3100)\n # Apply the delta and return the value.\n return new_radiation + delta", "def getHardSphereRadius(self):\n\n if self.__hardSphereRadius is not None:\n return self.__hardSphereRadius\n return self.__scatteringRadius" ]
[ "0.87892956", "0.87892956", "0.87892956", "0.87892956", "0.87892956", "0.87633866", "0.87077355", "0.8659776", "0.8640172", "0.86317974", "0.85356", "0.8518339", "0.847131", "0.83863753", "0.83786553", "0.8247246", "0.8189835", "0.8085827", "0.79818517", "0.7850247", "0.77838916", "0.77112305", "0.76140416", "0.7551594", "0.7501205", "0.7424428", "0.7406297", "0.73446", "0.73412675", "0.71715975", "0.71413034", "0.7127234", "0.7091055", "0.7067318", "0.70670843", "0.7048208", "0.702793", "0.69927424", "0.6983567", "0.6944307", "0.6899333", "0.6899333", "0.6892828", "0.6849406", "0.6834126", "0.68217653", "0.67994505", "0.67749035", "0.6756584", "0.6734795", "0.6731847", "0.6702752", "0.6702462", "0.6677404", "0.6658825", "0.6644936", "0.66317093", "0.6548208", "0.65237", "0.65055263", "0.6500134", "0.64933133", "0.64933133", "0.64896065", "0.648937", "0.6451629", "0.64309865", "0.63918436", "0.6362276", "0.63362217", "0.6330703", "0.63299686", "0.63150144", "0.6298747", "0.6290215", "0.62896043", "0.6267939", "0.6261382", "0.6237756", "0.6231923", "0.62272674", "0.62258273", "0.6216444", "0.62163514", "0.6199266", "0.6197388", "0.6178715", "0.61710227", "0.61522037", "0.6094762", "0.60867965", "0.6068591", "0.60645527", "0.60571706", "0.60558", "0.6052979", "0.6047821", "0.6044768", "0.6039032" ]
0.85236245
12
Gets the van Der Waals radius of this atom. Returns
def get_van_Der_Waals_radius(self): return self.van_Der_Waals_radius
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_radius(self):\n return self.r", "def get_radius(self):\n return self.R", "def get_radius(self):\n return self.__radius", "def radius(self) -> float:\n return get_radius_from_element(self.element)", "def get_radius(self):\n return self.radius", "def get_radius(self):\n return self.radius", "def radius(self):\n return sqrt(self.radius_square())", "def outer_radius(self):\n return self._outer_radius", "def radius(self):\n return self._radius", "def radius(self):\n return self._radius", "def radius(self):\n return self._radius", "def radius(self):\n return self._radius", "def radius(self):\n return self._radius", "def radius(self) -> float:\n return self._radius", "def get_radius(self):\n if self.no_dist is False:\n dist = self.distance\n radius = (dist * self.ang_size / 60. *\n np.pi/180. * ct._kpc_over_pc_)/2.\n self.radius = radius\n else:\n self.radius = -1 # use -1 to indicate unknown diameter\n\n return self.radius", "def getRadius(self):\n return self.__radius", "def getRadius(self):\n return self.radius", "def radius(self):\n if self._radius is None:\n translated_xyz = translate_to_center_of_mass(self.get_xyz())\n _, symbols, x, y, z = get_xyz_matrix(translated_xyz)\n border_elements = list() # a list of the farthest element/s\n r = 0\n for si, xi, yi, zi in zip(symbols, x, y, z):\n ri = xi ** 2 + yi ** 2 + zi ** 2\n if ri == r:\n border_elements.append(si)\n elif ri > r:\n r = ri\n border_elements = [si]\n atom_r = max([get_atom_radius(si) if get_atom_radius(si) is not None else 1.50 for si in border_elements])\n self._radius = r ** 0.5 + atom_r\n logger.info('Determined a radius of {0:.2f} Angstrom for {1}'.format(self._radius, self.label))\n return self._radius", "def inner_radius(self):\n return self._inner_radius", "def get_radius(self):\r\n return self._handler.get_radius()", "def radius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, self.vertices[0])", "def radius(self) -> Union[int, float]:\n return self.proto.radius", "def radius(self) -> float:\n return math.hypot(self.x, self.y)", "def eggleton_roche_radius(self):\n return self.eggleton_roche_over_separation() * self.separation()", "def get_radius(self):\r\n return 1", "def radius(self):\n c = self.centroid()\n dmax = -np.inf\n for vertex in self.path.vertices:\n d = np.linalg.norm(vertex - c)\n if d > dmax:\n dmax = d\n return d", "def radius(self) -> int:\n pass", "def getCoreRadius(self):\n return self.getNumRings(indexBased=True) * self.getFirstBlock().getPitch()", "def inradius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, cast(Segment, self.edges[0]).midpoint)", "def mean_radius(self):\n return self._mean_radius", "def polar_radius(self):\n return self.r * (1 - self.f)", "def get_radius(self):", "def circumference(self):\n return (2 * math.pi * self.__radius)", "def radius_square(self):\n try: \n return self._radius_2\n except AttributeError:\n center = self.center()\n self._radius_2 = max( (v.vector() - center).dot_product(\n v.vector() - center) for v in\n self.vertex_generator() )\n return self._radius_2", "def circumference(self):\n return math.pi * self.radius * 2", "def outer_rad(self):\n return self._outer_rad", "def diameter(self):\n return self.radius * 2", "def diameter(self):\n return self.radius * 2", "def diameter(self):\n return 2 * self.radius", "def estimate_radius(self):\n red = self.T[:,:,0] # empirically, the most reliable channel\n\n eye_radius = red.sum(axis=1).max() / 2\n return eye_radius", "def ee_radius(self, energy=FIRST_AIRY_ENCIRCLED):\n k, v = list(self._ee.keys()), list(self._ee.values())\n if energy in v:\n idx = v.index(energy)\n return k[idx]\n\n def optfcn(x):\n return (self.encircled_energy(x) - energy) ** 2\n\n # golden seems to perform best in presence of shallow local minima as in\n # the encircled energy\n return optimize.golden(optfcn)", "def hardSphereRadius(self):\n\n return self.__hardSphereRadius", "def inner_rad(self) -> Quantity:\n return self._inner_rad", "def getSphereRadius(self):\n return 1.5", "def radius(self):\n if self._radius is None:\n self._radius = self.stem / 2\n if self._radius * 2 > self.stem:\n raise Exception('Invalid radius. Maximum radius = 2 * stem.')\n return self._radius", "def circle_radius(self):\n return min([self.container.width, self.container.height]) / 4", "def getRadius(self):\r\n if len(self._indices)==0:\r\n return 0\r\n big=Cluster.distance(self,self._dataset.getPoint(self._indices[0]))\r\n for i in range (len(self._indices)):\r\n dist=Cluster.distance(self,self._dataset.getPoint(self._indices[i]))\r\n if (dist>big):\r\n big=dist\r\n return big", "def mean_radius(self):\n return (self.semimajor_axis + self.semimedium_axis + self.semiminor_axis) / 3", "def getArea(self):\n return math.pi * self.radius ** 2", "def effective_radius(self, n):\n\n er2 = 5.0 * self.sa / n\n er = np.sqrt(er2)\n\n return er", "def getArea(self):\n return math.pi * self.__radius * self.__radius", "def getS(self):\n\t\tsValue = math.sqrt((math.pow(self.x,2)) + (math.pow(self.y,2)))/self.radius\n\t\treturn sValue", "def ee_radius_diffraction(self, energy=FIRST_AIRY_ENCIRCLED):\n return _inverse_analytic_encircled_energy(self.fno, self.wavelength, energy)", "def get_receptive_field_radius(self):\n raise NotImplementedError()", "def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):\n return self.getDimension(\"od\")", "def area(self):\n return (self.__radius ** 2 * math.pi)", "def roundingRadius( self ):\n return self._roundingRadius", "def area(self):\n return self.radius*self.radius*math.pi", "def get_radiation():\n sun_pos = get_sun_position()\n if sun_pos <= POSITION_MIN or sun_pos >= POSITION_MAX:\n return 0\n else:\n # Calculate a new delta.\n delta = random.randint(0, RADIATION_DELTA)\n if random.random() > 0.5:\n delta = -1 * delta\n # Calculate the radiation based on the sun position.\n new_radiation = round(-0.1279 * pow(sun_pos, 2) + 46.05 * sun_pos - 3100)\n # Apply the delta and return the value.\n return new_radiation + delta", "def perimeter(self):\r\n\r\n return 2*math.pi*self.__radius", "def circle_area(self):\n return np.pi * self.ring_radius ** 2", "def area(self):\r\n return math.pi*(self.__radius**2)", "def area(self):\n return math.pi * math.pow(self.radius, 2)", "def Truncated_radius(self):\n r_trunc = fminbound(self.Mass_diff_005, -10., np.log10(self.scale_radius))\n return 10**float(r_trunc)", "def area(self):\n return math.pi * self.radius ** 2", "def area(self):\n return math.pi * self.radius ** 2", "def get_radius():\n function = LegacyFunctionSpecification()\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN,\n description = \"Index of the particle to get the radius of. This index must have been returned by an earlier call to :meth:`new_particle`\")\n function.addParameter('radius', dtype='float64', direction=function.OUT, description = \"The current radius of the particle\")\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n #function.can_handle_array = True\n function.must_handle_array = True\n return function", "def getPerimeter(self):\n return 2 * math.pi * self.__radius", "def get_radius(size):\n return (size * 10) - 5", "def get_diameter(self) -> float:\r\n \r\n return (self.box[3] - self.box[1] + self.box[2] - self.box[0]) / 2", "def Radius(self, *args):\n return _Bnd.Bnd_Sphere_Radius(self, *args)", "def dist_radius(self, Rp):\r\n\r\n return self.logunif(Rp, self.Rprange.to(\"earthRad\").value)", "def sqrt(self):\n\n\t\t# Maintain state of self and create new trace variable new_var\n\t\tnew_var = Var(self.val, self.der)\n\t\treturn new_var.__pow__(0.5)", "def getRadius(animation, jointlock, frame, pointpos, handthick = 3.5):\n# TODO: testar esse caso!!!\n p1 = jointlock.getPosition(frame)\n p2 = jointlock.getChildren(0).getPosition(frame)\n p0 = np.asarray(pointpos)\n p0p1 = p0-p1\n p0p2 = p0-p2\n p2p1 = p2-p1\n d = np.linalg.norm(np.cross(p0p1,p0p2))/np.linalg.norm(p2p1)\n if handthick:\n d = d-3.5\n return d", "def get_mean_radius(self):\n\n radius = np.array(self.coord_list)\n radius[:, 0] -= self.mean_pos[0]\n radius[:, 1] -= self.mean_pos[1]\n radius = np.sqrt(np.sum(radius ** 2, axis=1))\n mean_radius = np.mean(radius)\n return mean_radius", "def area(self):\n return math.pi*self._radius*self._radius", "def _earth_distance(time='now'):\n return get_earth(time).radius", "def radial6(self) -> float:\n return self.distortion_coefficients[2]", "def scatteringRadius(self):\n\n return self.__scatteringRadius", "def radial4(self) -> float:\n return self.distortion_coefficients[1]", "def getHardSphereRadius(self):\n\n if self.__hardSphereRadius is not None:\n return self.__hardSphereRadius\n return self.__scatteringRadius", "def _arc_radius(height_in_units):\n return height_in_units * _ARC_HEIGHT_UNIT / (1 - math.cos(_ANGLE))", "def __get_radius_of_curvature(self):\n t = self.t\n xp = np.cos((np.pi * t ** 2) / 2.0) # First derivative of x(t) (FresnelC)\n yp = np.sin((np.pi * t ** 2) / 2.0) # First derivative of y(t) (FresnelS)\n xpp = -np.pi * t * np.sin((np.pi * t ** 2) / 2.0) # Second derivative of x(t)\n ypp = np.pi * t * np.cos((np.pi * t ** 2) / 2.0) # Second derivative of y(t)\n return abs(\n ((xp ** 2 + yp ** 2) ** (3 / 2)) / (xp * ypp - yp * xpp)\n ) # Radius of curvature: https://en.wikipedia.org/wiki/Radius_of_curvature", "def volume(self) -> float:\n return 4 / 3 * np.pi * self.radius**3", "def semidiameter(radius, distance):\n\n return np.arcsin(radius / distance)", "def getScatteringRadius(self):\n\n return self.scatteringRadius", "def get_w_star(self):\n from .. import physics as phys\n return phys.w_star(self)", "def get_w_star(self):\n from .. import physics as phys\n return phys.w_star(self)", "def get_w_star(self):\n from .. import physics as phys\n return phys.w_star(self)", "def get_w_star(self):\n from .. import physics as phys\n return phys.w_star(self)", "def get_front_wheel_radius():\n\n # I used the three notches located around the rim of the wheel as reference points.\n # I selected one of the notches arbitrarily and put a piece of tape over it to identify it clearly.\n # Orienting this notch straight down, I played with drive_straight until I got that notch to complete\n # exactly one full rotation. This ended up being approximately 86 mm.\n # Since 86 mm is the circumference of the wheel, the radius will be 86/(2*Pi) mm\n return 86 / (2 * math.pi)", "def radialOuter(self):\n if self.radial in range(1, len(self.ThRZmesh.getPositions(label=\"R\"))):\n R = self.ThRZmesh.getUpper(label=\"R\", n=(self.radial))\n else:\n runLog.warning(\n \"Error: Radial Index ({0}) location not INSIDE mesh \".format(\n self.radial\n )\n )\n runLog.warning(self.ThRZmesh.getPositions(label=\"R\"))\n R = None\n return R", "def get_radius(center, rad, speed_limit):\n i = Intersection(center, rad, speed_limit)\n return i.get_radius()", "def rad(area) :\n return sqrt(area/pi)", "def compute_thickness(self):\n com = vtk.vtkCenterOfMass()\n com.SetInputData(self.inner_rim_poly)\n center = np.asarray(com.GetCenter()) # take center from inner points (not outer)\n\n irp_numpy = numpy_support.vtk_to_numpy(self.inner_rim_poly.GetPoints().GetData())\n orp_numpy = numpy_support.vtk_to_numpy(self.outer_rim_poly.GetPoints().GetData())\n\n # compute average radius ..\n rs_inner = np.linalg.norm(irp_numpy - np.tile(center, (irp_numpy.shape[0], 1)), axis = 1)\n rs_outer = np.linalg.norm(orp_numpy - np.tile(center, (orp_numpy.shape[0], 1)), axis = 1)\n\n # average out\n r_inner = np.mean(rs_inner)\n r_outer = np.mean(rs_outer)\n\n # compute distance\n d = r_outer - r_inner\n self.thickness = d\n\n return d", "def get_radial_distance_from(self, star):\n # T = (4pi^2r^3/(Gm1))^1/2\n # r = ((GmT^2)/(4pi^2))^1/3\n G = 6.67408e-11\n numerator = G*star.mass*pow(self.T, 2)\n denominator = 4*pow(math.pi, 2)\n return pow(numerator/denominator, 1/3)", "def get_front_wheel_radius():\n\t# ####\n\t# TODO: Empirically determine the radius of the robot's front wheel using the\n\t# cozmo_drive_straight() function. You can write a separate script for doing \n\t# experiments to determine the radius. This function should return the radius\n\t# in millimeters. Write a comment that explains how you determined it and any\n\t# computation you do as part of this function.\n\t# ####\n\n\t# Kept driving over and over, changing the distance/speed to see how far one rotation is (see get_wheel_radius_test.py)\n\t# One rotation is 2pi * radius distance, I empircally found that a rotation goes about 83.73mm.\n\t# I used a piece of paper stuck in the wheel to visually notice when a rotation had completed.\n\treturn 83.73 / (2 * math.pi)", "def Full_Extension(self, dist):\n return radtodeg * np.arctan(self.max_radius / dist)", "def circumference(self):\n return self.width + self.height", "def find_radius(mass,delta_m,eta,xi,mue,pp_factor):\n\n #range of radii; reason in detail under step 9 of report\n r_low = 0.01*Rsun # MKS\n r_high = 3*Rsun # MKS\n \n radius = brentq(lum_difference, r_low, r_high, xtol=1.0e-4, args = (mass,delta_m,eta,xi,mue,pp_factor))\n return radius" ]
[ "0.79551435", "0.78757066", "0.78437364", "0.7793091", "0.7788298", "0.7788298", "0.7774703", "0.7747598", "0.77394575", "0.77394575", "0.77394575", "0.77394575", "0.77394575", "0.7731752", "0.7719796", "0.7713599", "0.7675112", "0.7668061", "0.7660242", "0.7640078", "0.75554675", "0.74558413", "0.7382431", "0.73580617", "0.72847605", "0.7203548", "0.6982298", "0.6974618", "0.69417745", "0.6918043", "0.6906015", "0.6888268", "0.6862401", "0.68405426", "0.6796665", "0.6784048", "0.6751167", "0.6751167", "0.674017", "0.669276", "0.65959805", "0.6589323", "0.65809155", "0.65615606", "0.6547796", "0.654609", "0.65427583", "0.6514594", "0.6464349", "0.6460805", "0.6443688", "0.642759", "0.6401689", "0.6397791", "0.6397644", "0.6359754", "0.63489246", "0.63370144", "0.6336557", "0.63207346", "0.6305495", "0.6283078", "0.6277707", "0.6274113", "0.6262389", "0.6262389", "0.6257925", "0.6246409", "0.62461627", "0.6239753", "0.6232488", "0.62122416", "0.62116146", "0.6179611", "0.6178033", "0.6177507", "0.61660576", "0.6163751", "0.61539835", "0.6147829", "0.6138119", "0.6127553", "0.6126464", "0.6110893", "0.61044186", "0.610096", "0.60923135", "0.60923135", "0.60923135", "0.60923135", "0.6080058", "0.6063737", "0.60534406", "0.60207915", "0.6020524", "0.5953861", "0.5950649", "0.5950043", "0.5949667", "0.594402" ]
0.86208606
0
Gets the euler tensor of this atom. Returns
def get_euler(self): return array([ coord * self.coords for coord in self.coords ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def imu_get_euler(self):\n return self.imu.get_euler()", "def euler_integrator(self, t, y, tau):\n\n return self.plant.rhs(t, y, tau)", "def getTensor(self):\n\t\treturn self.cur_tensor", "def get_deltaE(self):\n return self.deltaE", "def e(self):\n return np.matrix([self.y - self.arg[0,0]*self.x**3 - self.arg[1,0]*self.x**2 - self.arg[2,0]*self.x**1 - self.arg[3,0]])", "def E(self):\n return self._E", "def E(self):\n return self._E", "def euler_characteristic(self):\n return Integer(self.degree() * 2 -\n sum(sum(j - 1 for j in self.profile(i))\n for i in range(self.length())))", "def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)", "def get_E(self):\n return self.E", "def getEll(self):\n\n\t\tellx = fftengine.fftfreq(self.data.shape[0])*2.0*np.pi / self.resolution.to(u.rad).value\n\t\telly = fftengine.rfftfreq(self.data.shape[0])*2.0*np.pi / self.resolution.to(u.rad).value\n\t\treturn np.sqrt(ellx[:,None]**2 + elly[None,:]**2)", "def getEta(self):\n self.__eta = 3./8.*(1. - self.__alpha0 - self.__alpha1 - 2.*self.__beta)\n if self.__eta<0.: self.__eta=0. # erreur d'arrondi\n return self.__eta", "def euler_from_quaternion(quaternion, axes='sxyz'):\r\n return euler_from_matrix(quaternion_matrix(quaternion), axes)", "def forwardEuler(self,un, tn):\n return un + self.dt*self.f(un, tn)", "def euler(faces, edges, verticies):\n\n # Return the calculated value\n return verticies + edges - faces", "def e(self):\n if self._e is None:\n # self._e = self.distributions.uniform(0.3,0.33)\n # return self._e\n # max is set by q but also limited by users choice of e_max.\n res_a = 29.9*((self.j[0]/self.k[0])**(2/3))\n q = self.distributions.truncated_normal(self.q_c, self.q_w, res_a*(1-0.8), res_a*(1-0.001))\n self._e = 1 - q/res_a\n return self._e", "def get_E(self):\r\n return self.E", "def get_ell(self):\n lx, ly = self.get_lxly()\n return np.sqrt(lx**2 + ly**2)", "def get_ell(self):\n lx, ly = self.get_lxly()\n return np.sqrt(lx**2 + ly**2)", "def get_ell(self):\n lx, ly = self.get_lxly()\n return np.sqrt(lx**2 + ly**2)", "def get_e(self):\n return self.e_min + self.e_ * self.e_range", "def etol(self) -> PotentialEnergy:\n return self._etol", "def inertia_tensor(self, masswt=True, zero=ZERO):\n return self.inertia_tensor_partial(range(self.natom()), masswt, zero)", "def get_eigenvalues(self):\n return self.eigenValues", "def omega(self):\n return self._data.train_X @ self._thetas", "def get_torque(self):\n return self.node.sdo[0x6077].phys # rate torque(mN.m) /1000", "def E(self, temperature):\n E = None\n if self._E_table:\n E = self._E_table.Value(temperature)\n return E", "def get_efg_tensor(self, atom_index: int) -> ArrayLike:\n return self._efg_tensors[atom_index - 1]", "def E(self):\n return self._properties['E']", "def get_eigen(self, predictor=True):\n\n if predictor is True:\n vec_ = range(1, self.npred + 1)\n fctr_ = self.gamma\n else:\n vec_ = range(1, self.nresp + 1)\n fctr_ = self.eta\n eigen = np.exp([-fctr_ * float(p_) for p_ in vec_]) / np.exp(-fctr_)\n return eigen", "def eulerphi(n):\n\treturn euler_phi(n)", "def eulerphi(n):\r\n\treturn euler_phi(n)", "def get_EKU(self):\n\n return self.get_POW().getEKU()", "def get_euler_angles_from_T(T):\n pass", "def get_E(self):\r\n return self.Real.E, self.Ideal.E", "def get_E(self):\r\n return self.Real.E, self.Ideal.E", "def nE(self):\n return int(self.vnE.sum())", "def get_data_term(self):\n \n if self.num_hidden == 0:\n \n data_term = -self.compute_energy(self.x, self.batch_size)\n \n else:\n \n data_term = -self.compute_free_energy(self.x)\n \n return T.sum(T.exp(-data_term))", "def degree_u(self):\n return self._degree_u", "def E(self):\n return self.generic_getter(get_energy, \"E\", \"convert_energy\")", "def ets(self):\n r = (self.table[0, 0] + self.table[0, 1]) * (self.table[0, 0] + self.table[1, 0]) / self.N\n return (self.table[0, 0] - r) / (self.table[0, 0] + self.table[0, 1] + self.table[1, 0] - r)", "def value(self):\n return self._adj_per_deg * self.temp * self.n_atoms", "def phi(self):\n if self._phi is None:\n self._phi = self.phi0 + self.distributions.uniform(-1, 1) * self.resamp\n return self._phi", "def temperature() -> FlowFieldVal:\n return [\n self._t_s - self._delta_t * tf.math.tanh(z / self._height) for z in zz\n ]", "def get_u(self, e):\n e_p = e - self.old_e\n\n self.old_e = self.e\n self.e = e\n\n self.sum_e += e\n\n # PID controller.\n u = - self.k_p * e - self.k_d * e_p - self.k_i * self.sum_e\n\n return u", "def phi(self):\n return self._phi", "def getEnergy(self):\n energy = 0.0\n\n for i in range(0, self.nPoints):\n energy += self.tDomain[i] ** 2\n\n energy /= self.nPoints\n return energy", "def EulerN(particle,dt):\n particle.acc = particle.acc\n particle.vel = particle.vel + particle.acc*dt\n particle.pos = particle.pos + particle.vel*dt\n\n return particle", "def get_ee_vel(self):\n raise NotImplementedError", "def getLattice(self):\n return self.unitcell", "def E(self, t):\n\n\t\tE = self.E0\n\n\t\t# Gaussian pulse shape\n\t\tE *= np.exp(-2.*np.log(2.)*((t-self.t0)/self.pulse_duration)**2.)\n\n\t\t# Instantaneous phase\n\t\tif self.phase:\n\t\t\tE *= np.cos(self.omega*(t-self.t0))\n\n\t\t# Transmition\n\t\tif self.remove_reflected_part and self.domain.D == 0:\n\t\t\tmaterial = self.domain.materials[0]\n\t\t\tE *= ((1.-material.Reflectivity)/material._Drude_index.real)**0.5\n\n\t\treturn E", "def GetEigenvalues(self):\n\t\treturn self.Solver.GetEigenvalues().real.copy()", "def eta():\n\n\t\teta = np.array([np.zeros(self.__Nparticles),np.random.normal(0,np.sqrt(self.__dt),self.__Nparticles)])\n\t\teta = eta.transpose()\n\t\treturn eta", "def expression_term(self):\n return self._expression_term", "def phi(self):\n if self._phi is None:\n self._phi = self.phi0 + 2*self.resamp*self.distributions.uniform(-0.5, 0.5)\n return self._phi", "def E(self, z):\n return np.sqrt(self.Omega_m * np.power(1 + z, 3.0) + self.Omega_L)", "def GetEulerSequenceList(self):\n return _gmat_py.Attitude_GetEulerSequenceList(self)", "def euler_from_quaternion(self, x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians", "def omega(self):\n return self._omega", "def U(self):\n return self._U", "def Euler2Mat(e):\n x=e[0]\n y=e[1]\n z=e[2]\n s1=np.sin(x)\n s2=np.sin(y)\n s3=np.sin(z)\n c1=np.cos(x)\n c2=np.cos(y)\n c3=np.cos(z)\n m=np.array([[c1*c2*c3-s1*s3,-c3*s1-c1*c2*s3,c1*s2],\n [c1*s3+c2*c3*s1,c1*c3-c2*s1*s3,s1*s2],\n [-c3*s2,s2*s3,c2]])\n return m", "def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n \n t2 = +2.0 * (w * y - z * x)\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return roll_x, pitch_y, yaw_z # in radians", "def determinant(self):\n if self.L is None or self.U is None:\n self.decomposeLU()\n\n retval = 1.0\n for i in range(self.rows):\n retval *= self.L[i, i] * self.U[i, i]\n return retval", "def determinant(self):\n return np.linalg.det(self._data)", "def value(self):\n return prod([p**e for p,e in self.__x], self.__unit)", "def phi(self):\n return (np.sum(self.diameters**self.ndim)*np.pi / (2*self.ndim))", "def energy(self):\n energy = -0.5*np.sum(self.phi)+0.5*np.sum(self.mass*np.sqrt(self.particles.momentum[:,0]**2+self.particles.momentum[:,1]**2)**2)\n return energy", "def tensor_density(self):\r\n from .converter import Converter\r\n return Converter.convert_density(self)", "def E_polynomial(self):\n\n from nodepy import stability_function\n p, q = self.stability_function()\n return stability_function.E_polynomial(p, q)", "def getEta( self, i : int ):\n return enumerate(self._Vals[i][ \\\n self._layout.starts[self._inv_dims_order[i]] : \\\n self._layout.ends[self._inv_dims_order[i]]])", "def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians", "def getReal(self):\n return _libsbml.ASTNode_getReal(self)", "def temperature(self):\n value = float(self._parent.query('R{}'.format(self._idx))[1:])\n return pq.Quantity(value, pq.Kelvin)", "def tensor_pure(self):\r\n from .converter import Converter\r\n return Converter.convert_pure(self)", "def determinant(self):\n if not self.isSquare():\n raise ValueError(\"Determinant is not defined for non-square matrix\")\n if (self._height == 1 and self._width == 1):\n return self._value[0][0]\n returnvalue = 0\n for i in range(self._width):\n returnvalue += self._value[0][i] * self.cofactor(0, i)\n return returnvalue", "def get_eangles(self):\n return self.eangles", "def euler_step(u, t, f, dt):\n \n return u + dt * f(u,t)", "def torch(self):\n tensor = self.data * 2**self.scale\n \n # Check for and warn about errors in conversion\n if bad_conversion(self, tensor):\n warnings.warn(\"Underflow and/or overflow detected \"\n \"during torch() call\", RuntimeWarning)\n\n return tensor", "def determinant(self):\n if self.n_rows != self.n_cols:\n raise Exception('Matrix is not square')\n if self.n_rows == 2:\n return (self.data[0][0] * self.data[1][1]) - (self.data[1][0] * self.data[0][1])\n else:\n echelon, ops = reduce_to_echelon(self.data.copy(), True)\n swaps = sum([1 if row[0] == 'swap' else 0 for row in ops])\n return math.prod([echelon[i][i] for i in range(len(echelon))]) * (-1) ** swaps", "def tensor_voigt(self):\n return np.array([self.m_tt, self.m_pp, self.m_rr, self.m_rp, self.m_rt,\n self.m_tp])", "def get_linearEvolving(self):\n return self.get_linearEvolvingEigen()", "def _etaE(self,x):\n return self._etaE_cool(x) + self._etaE_hot(x)", "def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n \n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return roll_x, pitch_y, yaw_z # in radians", "def get_temperature_delta(self):\n return self._mcp9600.get('DELTA').value", "def euler_from_quaternion(self, quaternion):\n x = quaternion.x\n y = quaternion.y\n z = quaternion.z\n w = quaternion.w\n\n sinr_cosp = 2 * (w * x + y * z)\n cosr_cosp = 1 - 2 * (x * x + y * y)\n roll = np.arctan2(sinr_cosp, cosr_cosp)\n\n sinp = 2 * (w * y - z * x)\n pitch = np.arcsin(sinp)\n\n siny_cosp = 2 * (w * z + x * y)\n cosy_cosp = 1 - 2 * (y * y + z * z)\n yaw = np.arctan2(siny_cosp, cosy_cosp)\n\n return roll, pitch, yaw", "def safe_read_euler(self):\n\n ifMutexAcquire(self.use_mutex)\n try:\n x, y, z = self.read_euler()\n except Exception as e:\n # print(\"safe read euler: {}\".format(str(e)))\n # x, y, z = 0, 0, 0\n raise\n finally:\n ifMutexRelease(self.use_mutex)\n return x,y,z", "def determinant(self):\n d1 = self._row_1[0] * (self._row_2[1] * self._row_3[2] - self._row_2[2] * self._row_3[1])\n d2 = self._row_1[1] * (self._row_2[0] * self._row_3[2] - self._row_2[2] * self._row_3[0])\n d3 = self._row_1[2] * (self._row_2[0] * self._row_3[1] - self._row_2[1] * self._row_3[0])\n return d1 - d2 + d3", "def euler_from_quaternion(x, y, z, w):\r\n\tt0 = +2.0 * (w * x + y * z)\r\n\tt1 = +1.0 - 2.0 * (x * x + y * y)\r\n\troll_x = math.atan2(t0, t1)\r\n\r\n\tt2 = +2.0 * (w * y - z * x)\r\n\tt2 = +1.0 if t2 > +1.0 else t2\r\n\tt2 = -1.0 if t2 < -1.0 else t2\r\n\tpitch_y = math.asin(t2)\r\n\r\n\tt3 = +2.0 * (w * z + x * y)\r\n\tt4 = +1.0 - 2.0 * (y * y + z * z)\r\n\tyaw_z = math.atan2(t3, t4)\r\n\r\n\treturn roll_x, pitch_y, yaw_z # in radians\r", "def T(self):\n return self.__T", "def phi(self):\n return np.arctan2(np.sqrt(self.x**2 + self.y**2), self.z)", "def eom(self, check=False):\n visitor = self.root.visitor\n if check:\n for tensor in visitor():\n tensor.check_completness(strict=True)\n # Clean\n for t in visitor():\n t.aux = None\n # Term by term...\n for n in self.term_visitor():\n for tensor in visitor(leaf=False):\n tmp = self._single_eom(tensor, n)\n prev = tensor.aux\n tensor.aux = tmp if prev is None else prev + tmp\n # Times coefficient\n for tensor in visitor(leaf=False):\n tensor.aux /= self.coefficient()\n if tensor.axis is None and self.init_energy is not None:\n tensor.aux -= self.init_energy * tensor.array\n return", "def ensemble_determinant(self):\n return np.linalg.det(self.ensemble_transition_matrix)", "def t(l3,Ei,Et,Et_axis):\n Ef=Ei-Et\n T=(-(l3/vFrmE(Ef))+(l3/np.sqrt(vFrmE(Ei)**2-vsq_from_E(Et_axis))))*1e6\n return (T)", "def get_force(self):\n displ = self.get_displ()\n equil = displ / np.linalg.norm(displ) * self.L0\n return self.k * (displ - equil)", "def ema(self) -> float:\n return self._ema", "def getRPY(self):\n\n currentmat = self.objcm.getMat()\n currentmatnp = base.pg.mat4ToNp(currentmat)\n rpy = rm.euler_from_matrix(currentmatnp[:3, :3], axes=\"sxyz\")\n return np.array([rpy[0], rpy[1], rpy[2]])", "def calculate(self):\n\n return self.confusion_matrix.tn", "def get_cells(self):\r\n return \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)", "def determinant (self):\n if self.is_square:\n det = 1\n for idx, row in enumerate(echelon_form(self).rows()):\n det *= row[idx]\n return det\n else:\n raise NotImplementedError(\n \"Determinant only defined for square matrices.\")", "def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)" ]
[ "0.65126354", "0.6043174", "0.59547913", "0.590338", "0.5897418", "0.58401287", "0.58401287", "0.5825734", "0.5797117", "0.5762484", "0.572737", "0.5713381", "0.5662122", "0.5651244", "0.5643783", "0.56348675", "0.56310534", "0.56149095", "0.56149095", "0.56149095", "0.5590667", "0.5574735", "0.55439043", "0.55356556", "0.55289936", "0.55267936", "0.5495457", "0.5494943", "0.54921675", "0.5445496", "0.54447186", "0.5421488", "0.54171157", "0.5407869", "0.5406261", "0.5406261", "0.53956354", "0.53862184", "0.5384387", "0.5378248", "0.53775555", "0.537406", "0.53535175", "0.53527176", "0.5351189", "0.5346375", "0.5320938", "0.53173196", "0.5301276", "0.5300922", "0.52940327", "0.5290587", "0.52727175", "0.5269965", "0.52655363", "0.5263423", "0.5263236", "0.52565336", "0.52536225", "0.5248382", "0.52383107", "0.52365357", "0.5220023", "0.51919395", "0.51880383", "0.5178635", "0.5172405", "0.5163393", "0.5161078", "0.5157443", "0.51545906", "0.51545876", "0.515329", "0.5151875", "0.5147451", "0.5147085", "0.51462215", "0.51457036", "0.5143106", "0.5139376", "0.513759", "0.5133379", "0.51298016", "0.5125239", "0.5124676", "0.51163155", "0.51144403", "0.5112835", "0.5108894", "0.5104584", "0.5096639", "0.50888693", "0.5083406", "0.50824326", "0.50810724", "0.508084", "0.5075631", "0.5075236", "0.5063429", "0.5059621" ]
0.64894354
1
Rotates this atom by the given rotation matrix.
def rotate(self, rotation): self.coords = dot(rotation, self.coords) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))", "def rotate(self, matrix):\n newCoord = np.zeros(self.coord.shape)\n newCoord[:,0]=matrix[0,0]+matrix[0,1]*self.coord[:,0]+matrix[0,2]*self.coord[:,1]+matrix[0,3]*self.coord[:,2]\n newCoord[:,1]=matrix[1,0]+matrix[1,1]*self.coord[:,0]+matrix[1,2]*self.coord[:,1]+matrix[1,3]*self.coord[:,2]\n newCoord[:,2]=matrix[2,0]+matrix[2,1]*self.coord[:,0]+matrix[2,2]*self.coord[:,1]+matrix[2,3]*self.coord[:,2]\n self.coord = deepcopy(newCoord)", "def rotate(mat,angle):\n return np.dot(Jones.rotator(angle), np.dot(mat, Jones.rotator(-angle)))", "def rotate(self, matrix):\n n = len(matrix)\n \n for circle in range(n//2):\n r_circle = n - circle - 1\n for i in range(circle, n - circle - 1):\n a = matrix[circle][i]\n b, matrix[i][r_circle] = matrix[i][r_circle], a\n c, matrix[r_circle][n - i - 1] = matrix[r_circle][n - i - 1], b\n d, matrix[n - i - 1][circle] = matrix[n - i - 1][circle], c\n matrix[circle][i] = d", "def rotate(self, rotation_matrix, centre=None):\n locations = self.locations.rotate(rotation_matrix, centre)\n if self.orientations is not None:\n orientations = self.orientations.rotate(rotation_matrix, None)\n else:\n orientations = None\n pcs = self.pcs.rotate(rotation_matrix, centre)\n self.locations = locations\n self.orientations = orientations\n self.pcs = pcs\n return self", "def rotate(self, x=0, y=0, z=0):\n\t\tquaternion = R.from_euler('xyz', [x, y, z], degrees=True)\n\t\trotation_matrix = np.array(quaternion.as_matrix())\n\t\trotation_matrix = np.pad(rotation_matrix, [(0, 1), (0, 1)], mode='constant')\n\t\trotation_matrix[3,3] = 1\n\n\t\tself.matrix = np.matmul(self.matrix, rotation_matrix)", "def rotate(self, matrix) -> None:\n c = len(matrix)\n matrix[:] = [[matrix[c-i-1][j] for i in range(c)] for j in range(c)]", "def rotation_matrix(self,rot_mat,center=True,**kwargs):\n xyz = self.get('x,y,z',**kwargs)\n\n if center:\n xyz0 = np.mean(xyz)\n xyz = np.dot(rot_mat,(xyz-xyz0).T).T + xyz0\n else:\n xyz = np.dot(rot_mat,(xyz).T).T\n self.update('x,y,z',xyz,**kwargs)", "def rotate(self, matrix: list[list[int]]) -> None:", "def rotate(self, matrix: List[List[int]]) -> None:\n flip(transpose(matrix))", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def rotate(self, rotation):\n\t\tif not isinstance(rotation,Rotation):\n\t\t\trotation = Rotation(*rotation)\n\t\treturn rotation.matrix() * self", "def rotate(self, quaternion):\n rot3d = quaternion2rot3d(quaternion)\n new_pos = np.dot(self.atom_pos, rot3d.T)\n self.set_atom_pos(new_pos)", "def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]", "def rotate_matrix(self, mat):\r\n N=3\r\n for x in range(0, int(N / 2)):\r\n for y in range(x, N-x-1):\r\n temp = mat[x][y]\r\n mat[x][y] = mat[y][N-1-x]\r\n mat[y][N-1-x] = mat[N-1-x][N-1-y]\r\n mat[N-1-x][N-1-y] = mat[N-1-y][x]\r\n mat[N-1-y][x] = temp\r\n return mat", "def rotate(self, matrix: List[List[int]]) -> None:\n # 矩阵转90° 等于先 转置矩阵,然后再 翻转一行\n # n = len((matrix[0])) # 获取矩阵一行的长度\n #\n # # transpose matrix\n # for i in range(n):\n # for j in range(i, n):\n # matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n #\n # # reverse each row\n # for i in range(n):\n # matrix[i].reverse()\n m = zip(*matrix) # 切片 例如: 切下第一列 1,4,7 , *\n for i, _m in enumerate(m):\n matrix[i] = list(_m)[::-1] # list函数变成list; [::-1] 翻转一下 变成7,4,1", "def mrotate(self):\n result_matrix = [[0 for col in range(len(self.matrix[0]))] for row in range(len(self.matrix))]\n for i in range(len(self.matrix)):\n for j in range(len(self.matrix[0])):\n result_matrix[i][j] = self.matrix[i][len(self.matrix[0]) - 1 - j]\n # left turn -> result_matrix[i][j] = self.matrix[len(self.matrix) - 1 - i][j]\n self.matrix = result_matrix\n pass", "def _rotate(self, affine):\n dims = affine.shape[0]\n if not np.isscalar(self.rotation):\n raise Exception('this class requires exactly one entry for rotation!')\n theta = (self.deformrandomstate.rand() - 0.5) * 2 * self.rotation\n if dims == 4:\n\n # sample unit vector:\n u = np.random.random(3)\n u /= np.sqrt(np.sum([uu ** 2 for uu in u]) + 1e-8)\n ct = np.cos(theta)\n st = np.sin(theta)\n rot = np.eye(4)\n rot[:3, :3] = [\n [ct + u[0] ** 2 * (1 - ct), u[0] * u[1] * (1 - ct) - u[2] * st, u[0] * u[2] * (1 - ct) + u[2] * st],\n [u[1] * u[0] * (1 - ct) + u[2] * st, ct + u[1] ** 2 * (1 - ct), u[1] * u[2] * (1 - ct) - u[0] * st],\n [u[2] * u[0] * (1 - ct) - u[1] * st, u[2] * u[1] * (1 - ct) + u[0] * st, ct + u[2] ** 2 * (1 - ct)]]\n\n elif dims == 3:\n rot = np.eye(3)\n rot[:2, :2] = np.asarray([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]])\n else:\n raise Exception(\n 'implement this for each dimension, since not yet implemented for dimension {}'.format(dims))\n\n return np.matmul(rot, affine)", "def rotate(self, matrix):\r\n matrix[:] = [list(row)[::-1] for row in zip(*matrix)]\r\n print(matrix)", "def rotate(self, matrix):\n # matrix[:] = zip(*matrix[::-1])\n n = len(matrix)\n # 水平翻转\n for i in range(n // 2):\n for j in range(n):\n matrix[i][j], matrix[n - i - 1][j] = matrix[n - i - 1][j], matrix[i][j]\n # 主对角线翻转\n for i in range(n):\n for j in range(i):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]", "def rotate(self, angle):\n\t\tif not isinstance(angle, Angle):\n\t\t\tangle = Angle(angle)\n\t\treturn angle.matrix() * self", "def Rotate(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Rotate(*args, **kwargs)", "def rotate(self, matrix: List[List[int]]) -> None:\n for i in range(len(matrix)):\n matrix[i] = matrix[i][::-1]\n for i in range(len(matrix)):\n for j in range(i, len(matrix[0])):\n temp = matrix[i][len(matrix[0])-1-j]\n matrix[i][len(matrix[0])-1-j] = matrix[j][len(matrix[0])-1-i]\n matrix[j][len(matrix[0])-1-i] = temp", "def rotate(self, angle, axis):\r\n R=self.rotation(angle, axis)\r\n self.mlist = (self*R).mlist\r\n return self", "def rotation_matrix(rotate):\n tx, ty, tz = rotate\n Rx = np.array([[1, 0, 0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])\n Ry = np.array([[np.cos(ty), 0, -np.sin(ty)], [0, 1, 0], [np.sin(ty), 0, np.cos(ty)]])\n Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0, 0, 1]])\n return np.dot(Rx, np.dot(Ry, Rz))", "def set_rotation_matrices(self):\r\n for i in range(len(self.vertices)):\r\n self.vertices[i].meta['C'] = rotation_matrix(self.vertices[i].meta['axis'][0], \r\n self.vertices[i].meta['axis'][1], \r\n self.vertices[i].meta['axis'][2], \r\n self.vertices[i].meta['axis_order'],\r\n degrees=True)\r\n # Todo: invert this by applying angle operations in reverse order\r\n self.vertices[i].meta['Cinv'] = np.linalg.inv(self.vertices[i].meta['C'])", "def rotate(matrix: List[List[int]]) -> None:\n if matrix is None:\n return\n\n # transpose\n for i in range(0, len(matrix)):\n for j in range(i, len(matrix[0])):\n temp = matrix[i][j]\n matrix[i][j] = matrix[j][i]\n matrix[j][i] = temp\n # reflect\n for i in range(0, len(matrix)):\n for j in range(0, len(matrix[0]) // 2):\n reflection = len(matrix[0]) - j - 1\n temp = matrix[i][j]\n matrix[i][j] = matrix[i][reflection]\n matrix[i][reflection] = temp", "def rotate(self, matrix: List[List[int]]) -> None:\n for r in range(len(matrix)):\n for c in range(r):\n matrix[r][c], matrix[c][r] = matrix[c][r], matrix[r][c]\n for row in matrix:\n row.reverse()", "def rotate(self, quaternion, origin_x = 0, origin_y = 0, origin_z = 0):\n\n for atom in self.get_atoms():\n atom.rotate(quaternion, origin_x, origin_y, origin_z)", "def rotate(self, matrix: List[List[int]]) -> None:\r\n # Vertical Mirror\r\n for i in range(len(matrix)):\r\n for j in range(floor(len(matrix)/2)):\r\n t = matrix[i][j]\r\n matrix[i][j] = matrix[i][len(matrix)-1-j]\r\n matrix[i][len(matrix)-1-j] = t\r\n \r\n # Top right to bottom left diagonal mirror\r\n for i in range(len(matrix)):\r\n for j in range(len(matrix)):\r\n if i < len(matrix)-1-j:\r\n t = matrix[i][j]\r\n matrix[i][j] = matrix[len(matrix)-j-1][len(matrix)-1-i]\r\n matrix[len(matrix)-j-1][len(matrix)-1-i] = t", "def _rotationMatrix(self, n_dim, theta):\n i = np.identity(n_dim)\n c, s = np.cos(theta)*i, np.sin(theta)*i\n rotation = np.bmat([[c, s], [-s, c]])\n return rotation", "def rotate(self, matrix: List[List[int]]) -> None:\n length = len(matrix)\n for row in range(length//2):\n for col in range(row, length-row-1):\n # matrix[row][col], matrix[col][length-1-row], matrix[length-1-row][length-1-col], matrix[length-1-col][row]\n matrix[col][length-1-row], matrix[length-1-row][length-1-col], matrix[length-1-col][row], matrix[row][col] = matrix[row][col], matrix[col][length-1-row], matrix[length-1-row][length-1-col], matrix[length-1-col][row]\n return", "def _rotate(self, angles, dj_matrix=None):\n if dj_matrix is None:\n dj_matrix = djpi2(self.lmax + 1)\n self.coeffs = SHRotateRealCoef(self.coeffs, angles, dj_matrix)", "def _rotate(self, angles, dj_matrix=None):\n if dj_matrix is None:\n dj_matrix = djpi2(self.lmax + 1)\n self.coeffs = SHRotateRealCoef(self.coeffs, angles, dj_matrix)", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for col in range(n):\n tmp = []\n for row in range(n):\n tmp.append(matrix[n-1-row][col])\n matrix.append(tmp)\n del(matrix[:n])", "def matrix(self):\n return self._rotation", "def transform(self, mat: TxMatrix) -> None:\n self.rotation = self.rotation - int(0x10000 * mat.angle / math.pi / 2) & 0xFFFF\n if mat.flipped:\n self.flip_y = not self.flip_y\n self.rotation = -self.rotation & 0xFFFF", "def rotationMatrix(self):\n\n R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def RotationMatrix(theta, x, y, z, point=None):\n\treturn mach.rotation_matrix(theta, [x, y, z])", "def rotate(self, matrix: List[List[int]]) -> None:\n \n n = len(matrix) \n \n def rotateLayer(n,scaler):\n \n for i in range(n-1):\n \n r0 = 0 +scaler\n c0 = 0 +i +scaler\n\n r1 = 0 +i +scaler\n c1 = n-1 +scaler\n\n r2 = n-1 +scaler\n c2 = n-1 -i +scaler\n\n r3 = n-1 -i +scaler\n c3 = 0 +scaler\n \n temp = matrix[r3][c3]\n matrix[r3][c3] = matrix[r2][c2]\n matrix[r2][c2] = matrix[r1][c1]\n matrix[r1][c1] = matrix[r0][c0]\n matrix[r0][c0] = temp\n\n scaler = 0\n \n for i in range(n,1,-2):\n \n rotateLayer(i,scaler)\n scaler += 1", "def rotate(self, x_angle, y_angle, z_angle, center=None):\n self._transform(\n Object.generate_rotation_matrix(x_angle, y_angle, z_angle),\n center)", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for i in range(n // 2):\n for t in range(n - 1 - 2 * i):\n temp = [matrix[i][-i-1-t], matrix[i+t][i], matrix[-i-1][i+t], matrix[-i-1-t][-i-1]]\n matrix[i][-i-1-t] = temp[1]\n matrix[i+t][i] = temp[2]\n matrix[-i-1][i+t] = temp[3]\n matrix[-i-1-t][-i-1] = temp[0]", "def rotate(self, matrix: List[List[int]]) -> None:\n\n x = 0\n sidelen = len(matrix)\n while sidelen-x > 0:\n a = sidelen - x\n for i in range(x, a-1): # swap 1. row vs right colunm\n matrix[x][i], matrix[i][a - 1] = matrix[i][a - 1], matrix[x][i]\n\n for i in range(x, a-1): # swap 1. row vs last row\n matrix[x][i], matrix[a - 1][a - i-1 +x] = matrix[a - 1][a - i-1+x], matrix[x][i]\n\n for i in range(x, a-1): # swap 1. row vs first column\n matrix[x][i], matrix[a - i-1+x][x] = matrix[a - i-1+x][x], matrix[x][i]\n\n x += 1", "def rotate(self, matrix: List[List[int]]) -> None:\n if(matrix == None or len(matrix) == 1): return\n n = len(matrix)\n for i in range(0, n//2 + 1):\n for j in range(i, n-1-i):\n tmp = matrix[i][j]\n matrix[i][j] = matrix[n-1-j][i]\n matrix[n-1-j][i] = matrix[n-1-i][n-1-j]\n matrix[n-1-i][n-1-j] = matrix[j][n-1-i]\n matrix[j][n-1-i] = tmp\n \n return", "def rot(self, t=0., transposed=False):\n rotmat = np.array(\n [[np.cos(self._pa+self._omegab*t),np.sin(self._pa+self._omegab*t)],\n [-np.sin(self._pa+self._omegab*t),np.cos(self._pa+self._omegab*t)]])\n if transposed:\n return rotmat.T\n else:\n return rotmat", "def _rot(axis, angle):\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for i in range(n - 1):\n for j in range(n - 1 - i):\n matrix[i][j], matrix[n-1-j][n-1-i] = matrix[n-1-j][n-1-i], matrix[i][j]\n for i in range(n):\n for j in range(n // 2):\n matrix[j][i], matrix[n-1-j][i] = matrix[n-1-j][i], matrix[j][i]", "def rotate(self, matrix: List[List[int]]) -> None:\n # 对角线对称\n num_row = len(matrix)\n num_col = len(matrix[0])\n for i in range(num_row):\n for j in range(i, num_col):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n # 列对称\n for i in range(num_row):\n for j in range(num_col // 2):\n matrix[i][j], matrix[i][num_col-1-j] = matrix[i][num_col-1-j], matrix[i][j]\n return matrix", "def rotate(self, angle=0.0):\n # TODO: Implement the rotate function. Remember to record the value of\n # rotation degree.\n self.rotDegree = angle\n self.x = rotate(self.x, angle = angle, axes=(0, 1), reshape=False, \n output=None, order=3, mode='constant', cval=0.0, prefilter=True)\n # This rotation isn't working correctly. Get shit for non right anlge rotatations\n # raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix) # 行\n\n # 以x=y为轴翻转\n # [[1,2,3],\n # [4,5,6],\n # [7,8,9]]\n # 变为\n # [1 4 7]\n # [2 5 8]\n # [3 6 9]\n for i in range(n):\n for j in range(i, n):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n # 以中点为轴翻转\n for i in range(n):\n for j in range(n // 2):\n matrix[i][j], matrix[i][n - j - 1] = matrix[i][n - j - 1], \\\n matrix[i][j]\n\n # 非原地修改写法,先上下翻转,再以x=y为轴复制对应数字\n # n = len(matrix)\n # r = list(zip(*matrix[::-1]))\n # for i in range(n):\n # for j in range(n):\n # matrix[i][j] = r[i][j]", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for i in range(n):\n for j in range(n // 2):\n matrix[i][j], matrix[i][n-1-j] = matrix[i][n-1-j], matrix[i][j]\n for i in range(n):\n for j in range(n-i):\n matrix[i][j], matrix[n-1-j][n-1-i] = matrix[n-1-j][n-1-i], matrix[i][j]", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n R = Rotator.rotation_matrix(alpha,0.0,0.0)\n return np.dot(R,X)", "def rotate(self, matrix):\n # # 解法1. 使用zip函数的解压缩,将matrix按列打包\n # for i, conlum in enumerate(zip(*matrix)):\n # matrix[i] = list(conlum)[::-1]\n # return matrix\n\n\n # 解法2. 规规矩矩的inplace替换\n # 首先 对每一个元素横竖坐标互换,相当于沿左上到右下的对角线翻转, 这样每一行再逆序就是最终结果\n for i in range(len(matrix[0])):\n for j in range(i,len(matrix)):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n \n # 下面每一行逆序,python有更简便的方法\n for i in range(len(matrix)):\n matrix[i] = matrix[i][::-1]", "def rotate(self, angle):\n self.call('rotate', angle)", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for l in range(n // 2):\n r = n - 1 - l\n for p in range(l, r):\n q = n - 1 - p\n cache = matrix[l][p]\n matrix[l][p] = matrix[q][l]\n matrix[q][l] = matrix[r][q]\n matrix[r][q] = matrix[p][r]\n matrix[p][r] = cache", "def rotate(self, matrix: List[List[int]]) -> None:\n r = c = len(matrix)\n m = 0\n n = r - 1\n\n while m < n:\n i = m\n for j in range(m, n):\n # print(i, j)\n # print(j, n)\n # print(n, c - j - 1)\n # print(c - j - 1, m)\n temp1 = matrix[j][n]\n matrix[j][n] = matrix[i][j]\n\n temp2 = matrix[n][c - j - 1]\n matrix[n][c - j - 1] = temp1\n\n temp3 = matrix[c - j - 1][m]\n matrix[c - j - 1][m] = temp2\n\n matrix[i][j] = temp3\n # print(matrix)\n m += 1\n n -= 1", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n if n <= 1:\n return\n\n for i in range((n + 1)//2):\n for j in range(i, n - 1 - i):\n tmp = matrix[i][j]\n matrix[i][j] = matrix[n - 1 - j][i]\n matrix[n - 1 - j][i] = matrix[n - 1 - i][n - 1 - j]\n matrix[n - 1 - i][n - 1 - j] = matrix[j][n - 1 - i]\n matrix[j][n - 1 - i] = tmp", "def rotate(self, matrix: 'List[List[int]]') -> 'None':\n lens = len(matrix)\n half = (lens-1)/2.0\n for i in range(int(lens/2)):\n for j in range(i, lens-i-1):\n p = 4\n ti, tj, tval = i, j, matrix[i][j]\n while p > 0:\n i1, j1 = tj, half*2-ti\n ti, tj, tval, matrix[i1][j1] = i1, j1, matrix[i1][j1], tval\n p -= 1\n return matrix", "def rotation(self, camera_rotation: CameraRotationType):\n self.set_rotation(camera_rotation)\n self._notify_moved()", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix[0])\n for i in range(n // 2 + n % 2):\n for j in range(n // 2):\n tmp = matrix[n - 1 - j][i]\n matrix[n - 1 - j][i] = matrix[n - 1 - i][n - j - 1]\n matrix[n - 1 - i][n - j - 1] = matrix[j][n - 1 -i]\n matrix[j][n - 1 - i] = matrix[i][j]\n matrix[i][j] = tmp", "def rotate(self, matrix: List[List[int]]) -> None:\r\n n = len(matrix)\r\n for j in range((n+1)//2):\r\n for i in range(n-2*j-1):\r\n matrix[j][j+i], matrix[j+i][n-1-j], matrix[n-1-j][n-1-j-i], matrix[n-1-j-i][j] = matrix[n-1-j-i][j], matrix[j][j+i], matrix[j+i][n-1-j], matrix[n-1-j][n-1-j-i]", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n left_top = [0, 0]\n right_top = [0, n-1]\n left_down = [n-1, 0]\n right_down = [n-1, n-1]\n \n for level in range(n//2):\n for step in range(n-1-2*level):\n matrix[left_top[0]+level][left_top[1]+level+step], \\\n matrix[right_top[0]+level+step][right_top[1]-level], \\\n matrix[right_down[0]-level][right_down[1]-level-step], \\\n matrix[left_down[0]-level-step][left_down[1]+level] = \\\n matrix[left_down[0]-level-step][left_down[1]+level], \\\n matrix[left_top[0]+level][left_top[1]+level+step], \\\n matrix[right_top[0]+level+step][right_top[1]-level], \\\n matrix[right_down[0]-level][right_down[1]-level-step]", "def rotation_matrix(self, rotation, rotation_order=\"zyx\"):\n x = math.radians(rotation[0])\n y = math.radians(rotation[1])\n z = math.radians(rotation[2])\n\n cos = math.cos\n sin = math.sin\n if rotation_order == 'zyx':\n index_0 = cos(y) * cos(z)\n index_1 = cos(z) * sin(x) * sin(y) - cos(x) * sin(z)\n index_2 = cos(x) * cos(z) * sin(y) + sin(x) * sin(z)\n\n index_3 = cos(y) * sin(z)\n index_4 = cos(x) * cos(z) + sin(x) * sin(y) * sin(z)\n index_5 = -cos(z) * sin(x) + cos(x) * sin(y) * sin(z)\n\n index_6 = -sin(y)\n index_7 = -cos(y) * sin(x)\n index_8 = cos(x) * cos(y)\n elif rotation_order == 'xyz':\n index_0 = cos(y) * cos(z)\n index_1 = -cos(z) * sin(z)\n index_2 = sin(y)\n\n index_3 = cos(x) * sin(z) + sin(x) * sin(y) * cos(z)\n index_4 = cos(x) * cos(z) - sin(x) * sin(y) * sin(z)\n index_5 = -sin(x) * cos(y)\n\n index_6 = sin(x) * sin(z) - cos(x) * sin(y) * cos(z)\n index_7 = sin(x) * cos(z) + cos(x) * sin(y) * sin(z)\n index_8 = cos(x) * cos(y)\n\n rot_mat = ((index_0, index_1, index_2),\n (index_3, index_4, index_5),\n (index_6, index_7, index_8))\n\n return rot_mat", "def rotate(self, matrix: list) -> list:\n # l = len(matrix)\n # for i in range(l):\n # for j in range(l):\n # temp = matrix[i][l-1]\n # if j != l:\n # matrix[j][l-1] = matrix[i][j]\n # else:\n # matrix[j][l-1] = temp\n # l -= 1\n \n matrix.reverse() # Reverse\n for i in range(len(matrix)):\n for j in range(i, len(matrix)):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j] # Transpose\n\n return matrix", "def rotate(self, matrix: List[List[int]]) -> None:\n # transpose\n N = len(matrix)\n for i in range(N):\n for j in range(0, i):\n matrix[i][j] = matrix[i][j] ^ matrix[j][i]\n matrix[j][i] = matrix[i][j] ^ matrix[j][i]\n matrix[i][j] = matrix[i][j] ^ matrix[j][i]\n \n # print(matrix)\n # 左右互换\n for i in range(N):\n for j in range(N // 2):\n matrix[i][j] = matrix[i][j] ^ matrix[i][N - j - 1]\n matrix[i][N - j - 1] = matrix[i][j] ^ matrix[i][N - j - 1]\n matrix[i][j] = matrix[i][j] ^ matrix[i][N - j - 1]\n # print(matrix)", "def rotate_matrix(angle):\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, s],\n [-s, c]])", "def _rotate(self, arr, theta):\n # Rotation Matrix R\n R = [[np.cos(theta), -np.sin(theta)], \n [np.sin(theta), np.cos(theta)]]\n\n return np.matmul(R, arr.T).T", "def _rotate(self, angle):\n angle *= self._degreesPerAU\n self._orient = self._orient.rotate(angle)", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n\n beta = np.arccos(1.0-2*random.rand())\n psi = random.rand() * 2*pi\n\n R = Rotator.rotation_matrix(alpha,beta,psi)\n return np.dot(R,X)", "def apply_rotation(self, eta=0.0, phi=0.0, theta=0.0):\n \n new_rotation_matrix = self.rotation_elements( eta, phi, theta )\n \n #self.rotation_matrix_exp = np.dot( self.rotation_matrix_exp , new_rotation_matrix )\n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def __rotate_model(self):\n self.__model_matrix = self.__get_rotation_matrix(\n self.__face.position_cartesian,\n (1 + self.__face.position[2]) * 0.5)", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n beta = self.beta_sample()\n R = Rotator.rotation_matrix(alpha,beta,0.0)\n X = np.dot(R, X)\n if self.random_flip and (random.rand() > 0.5):\n X[2,:] = -X[2,:]\n X[1,:] = -X[1,:]\n return X", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for i in range(n):\n for j in range(i+1):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n for i in range(n//2):\n for j in range(n):\n matrix[j][i], matrix[j][n-i-1] = matrix[j][n-i-1], matrix[j][i]\n return matrix", "def rotate(self, angle):\n old_angle, tilt = self.rotation\n new_angle = old_angle + angle\n while new_angle > 90:\n new_angle = new_angle - 90\n while angle < -90:\n new_angle = new_angle + 90\n self.rotation = (new_angle, tilt)", "def rotate(self, matrix: list) -> None:\n for i in range(len(matrix)):\n for j in range(i):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n # matrix[i].reverse()\n print(matrix)\n for i in range(len(matrix)):\n matrix[i].reverse()\n print(matrix)", "def rotate(self, matrix: List[List[int]]) -> None:\n # Step 1 flip row in reverse order\n top, down = 0, len(matrix) - 1\n while top < down:\n temp = matrix[top]\n matrix[top] = matrix[down]\n matrix[down] = temp\n top += 1\n down -= 1\n\n # Step 2 flip (i,j) -> (j, i)\n for i in range(len(matrix)):\n for j in range(i+1, len(matrix[i])):\n temp = matrix[i][j]\n matrix[i][j] = matrix[j][i]\n matrix[j][i] = temp", "def apply_rotation_z(self, theta=0.0 ):\n \n theta = radians(theta)\n new_rotation_matrix = [[ +cos(theta) , -sin(theta) , 0 ],\n [ +sin(theta) , +cos(theta) , 0 ],\n [ 0 , 0 , 1 ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def _rotation_matrix(self, axis, angle):\n axis = axis/np.linalg.norm(axis)\n axis_squared = np.square(axis)\n cos_angle = np.cos(angle)\n sin_angle = np.sin(angle)\n rot_matrix_row_one = np.array([cos_angle+axis_squared[0]*(1-cos_angle),\n axis[0]*axis[1]*(1-cos_angle) - axis[2]*sin_angle,\n axis[0]*axis[2]*(1-cos_angle)+axis[1]*sin_angle])\n\n rot_matrix_row_two = np.array([axis[1]*axis[0]*(1-cos_angle)+axis[2]*sin_angle,\n cos_angle+axis_squared[1]*(1-cos_angle),\n axis[1]*axis[2]*(1-cos_angle) - axis[0]*sin_angle])\n\n rot_matrix_row_three = np.array([axis[2]*axis[0]*(1-cos_angle)-axis[1]*sin_angle,\n axis[2]*axis[1]*(1-cos_angle)+axis[0]*sin_angle,\n cos_angle+axis_squared[2]*(1-cos_angle)])\n\n rotation_matrix = np.array([rot_matrix_row_one, rot_matrix_row_two, rot_matrix_row_three])\n return rotation_matrix", "def rotatematrix(m, x, y ,z):\r\n for i in xrange(x):\r\n m = rotatem_x(m)\r\n for i in xrange(y):\r\n m = rotatem_y(m)\r\n for i in xrange(z):\r\n m = rotatem_z(m)\r\n return m", "def transformation_matrix(self):\n t = np.array([[0.0], [0.0], [0.0]])\n Rt = np.hstack([self.rotation_matrix, t])\n return np.vstack([Rt, np.array([0.0, 0.0, 0.0, 1.0])])", "def rotate(self, angle):\n rotmat = rotation_matrix_2d(angle)\n rotated = np.dot(rotmat.T, [self.pix_x.value, self.pix_y.value])\n self.pix_x = rotated[0] * self.pix_x.unit\n self.pix_y = rotated[1] * self.pix_x.unit\n self.pix_rotation -= angle", "def set_rotation(self, camera_rotation: CameraRotationType):\n assert (\n type(camera_rotation) in (tuple, list)\n and len(camera_rotation) == 2\n and all(type(v) in (int, float) for v in camera_rotation)\n ), \"format for camera_rotation is invalid\"\n self._reset_matrix()\n self._rotation = tuple(camera_rotation)", "def rotate(self, matrix):\n n = len(matrix)\n #转置\n for i in range(n):\n for j in range(i+1,n):\n matrix[i][j],matrix[j][i] = matrix[j][i],matrix[i][j]\n #镜像\n mid = n//2\n for i in range(n):\n for j in range(mid):\n matrix[i][j],matrix[i][n-j-1] = matrix[i][n-j-1],matrix[i][j]", "def rotateZ(self, angle):\r\n if angle:\r\n c = cos(radians(angle))\r\n s = sin(radians(angle))\r\n self.mtrx = dot([[c, s, 0, 0],\r\n [-s, c, 0, 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 0, 1]],\r\n self.mtrx)\r\n self.rtn[2] = angle\r\n self.was_moved = True", "def rotate(self, a):\n ca = cos(a)\n sa = sin(a)\n self.v = Matrix([\n [ca, -sa],\n [sa, ca]\n ]) @ self.v\n return self", "def set_rotation(self, angle):\n self._rotation = angle\n self._reset_slot_bounds()", "def rotate(self, a):\n ca = cos(a)\n sa = sin(a)\n rM = Matrix([\n [ca, -sa],\n [sa, ca]\n ])\n p0 = self.p0\n self.c = p0 + rM @ (self.c - p0)\n dp = p0 - self.c\n self.a0 = atan2(dp.y, dp.x)\n return self", "def with_basis_rotated_by(self, transformation_matrix):\n if not _is_real_orthogonal(transformation_matrix):\n raise ValueError(\"Transformation matrix is not real orthogonal.\")\n\n rotated_op = MajoranaOperator()\n for term, coeff in self.terms.items():\n rotated_term = _rotate_basis(term, transformation_matrix)\n rotated_term *= coeff\n rotated_op += rotated_term\n return rotated_op", "def _cubelet_rotation_matrix(self, cubelet_meta_info, qpos_array):\n euler_angles = qpos_array[cubelet_meta_info[\"euler_qpos\"]]\n return rotation.euler2mat(euler_angles)", "def rotate(matrix):\n # 解法1\n # matrix[:] = map(list, zip(*matrix[::-1]))\n length = len(matrix[0])\n\n # 解法2\n # 将数组沿对角线置换\n # for i in range(length):\n # for j in range(i, length):\n # matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n # 翻转每一行\n # for i in range(length):\n # matrix[i].reverse()\n\n # 解法3\n for i in range(length // 2 + length % 2):\n for j in range(length // 2):\n matrix[length-1-j][i], matrix[length-1-i][length-1-j], matrix[j][length-1-i], matrix[i][j] =\\\n matrix[length-1-i][length-1-j], matrix[j][length-1-i], matrix[i][j], matrix[length-1-j][i]", "def rotate(self, matrix: List[List[int]]) -> None:\n height=len(matrix)\n for h in range(math.ceil(height/2)):\n for i in range(h,height-h-1):\n # print((h,i), (height-i-1,h))\n temp=matrix[h][i]\n matrix[h][i] = matrix[height-i-1][h]\n matrix[height-i-1][h] = matrix[height-h-1][height-i-1]\n matrix[height-h-1][height-i-1] = matrix[i][height-h-1]\n matrix[i][height-h-1] = temp", "def rotation_matrix(self, di=(0, 0, 0), dj=(0, 0, 0)):\n xi, xj = self.get_nodes()\n di, dj = np.asarray(di), np.asarray(dj)\n dx, dy, dz = (xj - xi) + (dj - di)\n return rotation_matrix(dx, dy, dz, self.roll)", "def rotate(self, m):\n n = len(m)\n for i in range(n//2):\n for j in range(i,n-i-1):\n m[j][~i],m[~i][~j],m[~j][i],m[i][j] = \\\n m[i][j],m[j][~i],m[~i][~j],m[~j][i]", "def _rotate(self, angle):\n if self.undobuffer:\n self.undobuffer.push((\"rot\", angle, self._degreesPerAU))\n angle *= self._degreesPerAU\n neworient = self._orient.rotate(angle)\n tracing = self.screen._tracing\n if tracing == 1 and self._speed > 0:\n anglevel = 3.0 * self._speed\n steps = 1 + int(abs(angle)/anglevel)\n delta = 1.0*angle/steps\n for _ in range(steps):\n self._orient = self._orient.rotate(delta)\n self._update()\n self._orient = neworient\n self._update()", "def apply_rotation_x(self, eta=0.0 ):\n \n eta = radians(eta)\n new_rotation_matrix = [[ 1 , 0 , 0 ],\n [ 0 , +cos(eta) , -sin(eta) ],\n [ 0 , +sin(eta) , +cos(eta) ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def rotate(self, radians):\n self._impl.rotate(radians)", "def rotate(self, matrix: List[List[int]]) -> None:\n def _rotate(little_matrix, left, right):\n n = right - left\n for i in range(n):\n little_matrix[left][left+i], little_matrix[left+i][right], little_matrix[right][right-i], little_matrix[right-i][left] = \\\n little_matrix[right-i][left], little_matrix[left][left+i], little_matrix[left+i][right], little_matrix[right][right-i]\n left, right = 0, len(matrix) - 1\n while left < right:\n _rotate(matrix, left, right)\n left += 1\n right -= 1", "def rotation_angle(self, rotation_angle):\n\n self.container['rotation_angle'] = rotation_angle" ]
[ "0.73639774", "0.7307685", "0.7284679", "0.7252222", "0.7239165", "0.71252877", "0.7052873", "0.69820964", "0.69736505", "0.6949628", "0.69119763", "0.6898012", "0.6892073", "0.6847489", "0.6815046", "0.6808609", "0.6807251", "0.68026036", "0.6777418", "0.677454", "0.6742357", "0.67322063", "0.66854", "0.6683385", "0.6678852", "0.6675596", "0.6674149", "0.66656786", "0.66551214", "0.6628479", "0.6620907", "0.66164416", "0.661021", "0.661021", "0.6607251", "0.66010636", "0.6600382", "0.6598514", "0.65423745", "0.6530391", "0.6518132", "0.65118504", "0.65027064", "0.6485216", "0.6484478", "0.6476611", "0.64677835", "0.64620924", "0.64587295", "0.6458604", "0.64548385", "0.64431787", "0.6442375", "0.64227915", "0.64192563", "0.64130026", "0.6412837", "0.64050144", "0.63916016", "0.63905245", "0.6384642", "0.6379425", "0.63758916", "0.6368448", "0.63677216", "0.6358502", "0.6356049", "0.6348472", "0.6337038", "0.63345623", "0.63290143", "0.63158005", "0.63045186", "0.63027686", "0.6294387", "0.6286533", "0.62852305", "0.62666863", "0.62520003", "0.6243878", "0.6234401", "0.6219825", "0.62181336", "0.6217863", "0.62010425", "0.61987936", "0.61987615", "0.61875284", "0.6185061", "0.6179668", "0.61744016", "0.6169398", "0.61692613", "0.6161445", "0.61601436", "0.61450887", "0.6141226", "0.6133956", "0.6121816", "0.61103636" ]
0.67754483
19
Translates this atom by the given translation vector.
def translate(self, translation): self.coords = self.coords - translation return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def translate(self, vector):\n locations = self.locations.translate(vector)\n # do not translate the orientations!\n pcs = self.pcs.translate(vector)\n self.locations = locations\n self.pcs = pcs\n return self", "def translate(self, vector):\n \n matrix = wf.translationMatrix(*vector)\n for wireframe in self.wireframes.values():\n wireframe.transform(matrix)", "def translate(self, vect):\n self.apply(lambda c: (vector(c) + vect).coords())", "def translate(self, vec):\n\n\t\tfor chain in self.chain:\n\t\t\tchain.translate(vec)", "def __add__(self, vector):\n return self.translated(vector)", "def translateAll(self, vector):\n matrix = self.translationMatrix(vector[0], vector[1], vector[2])\n for wireframe in self.wireframes.values():\n wireframe.transform(matrix)", "def translate(self, trans_vector: Union[Tensor, np.ndarray]) -> None:\n if not isinstance(trans_vector, Tensor):\n trans_vector = self.tensor.new_tensor(trans_vector)\n trans_vector = trans_vector.squeeze(0)\n if trans_vector.dim() == 1:\n assert trans_vector.shape[0] == 3\n elif trans_vector.dim() == 2:\n assert trans_vector.shape[0] == self.tensor.shape[0] and \\\n trans_vector.shape[1] == 3\n else:\n raise NotImplementedError(\n f'Unsupported translation vector of shape {trans_vector.shape}'\n )\n self.tensor[:, :3] += trans_vector", "def translate(self, vec):\n self.substrates = shapely.affinity.translate(self.substrates, vec[0], vec[1])\n self.partitionLine = shapely.affinity.translate(self.partitionLine, vec[0], vec[1])\n for annotation in self.annotations:\n o = annotation.origin\n annotation.origin = (o[0] + vec[0], o[1] + vec[1])\n\n def newRevertTransformation(point, orig=self.revertTransformation, vec=vec):\n prevPoint = (point[0] - vec[0], point[1] - vec[1])\n if orig is not None:\n return orig(prevPoint)\n return prevPoint\n self.revertTransformation = newRevertTransformation", "def translation(self,vect,**kwargs):\n xyz = self.get('x,y,z',**kwargs)\n xyz += vect\n self.update('x,y,z',xyz,**kwargs)", "def translate(self, vec=()):\n for elem in self._elements:\n elem.translate(vec)", "def fromTranslation(cls, vector):\n return super().fromTranslation(3, 3, vector)", "def translate(self, x, y, z):\n\n for atom in self.get_atoms():\n atom.translate(x, y, z)", "def fromTranslation(cls, vector):\n return super().fromTranslation(4, 4, vector)", "def translate(self, vec, newcr):\n vec = vec.view(Vec)\n vec_t = vec + newcr.o + self.o\n return vec_t", "def translate(self,translation_vector):\n if not isinstance(translation_vector,Cartesian3DVector):\n raise CoordinateException(\"Translating a particle with the incorrect translation vector type.\")\n new_particle = self.__class__(self.mass,self.x-translation_vector,self.p)\n return new_particle", "def translate(self, x=0, y=0, z=0):\n\t\ttranslation = np.identity(4)\n\t\ttranslation[0, 3] += x\n\t\ttranslation[1, 3] += y\n\t\ttranslation[2, 3] += z\n\t\t\n\t\tself.matrix = np.matmul(self.matrix, translation)", "def translate(self, v):\n return Position.fromnp(translate(self.tonp(), v))", "def translate(self,translation_vector):\n if isinstance(translation_vector,Cartesian3DVector):\n new_particle = self.__class__(self.mass,self.time,x=self.x-translation_vector,p=self.p)\n return new_particle\n raise CoordinateException(\"Translating a particle with the incorrect translation vector type.\")", "def vector_trans(self, v, T, V0):\n v = np.array(v)\n newv = np.add(v[0:2].dot(T), V0)\n self.log.debug(\"Transformation of vector {}, with transformation matrix {} nad V0 {}, to: {}\".format(v, T, V0, newv))\n return newv", "def translate(self, vector):\n if self.blender_object:\n self.blender_object.location = vector", "def translation(self, x, y, z) -> None:\n ...", "def TransformVector(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_TransformVector(self, *args)", "def TransformVector(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD3_TransformVector(self, *args)", "def fromTranslation(cls, rows, cols, vector):\n data = np.eye(rows, cols)\n data[0:rows - 1, cols - 1] = vector[0:rows - 1]\n return cls.create(rows, cols, data)", "def translate(self, axis: Vector, dist: float):\n self.origin = self.origin + axis * dist", "def translate(self, x, y, z) -> None:\n ...", "def translate(self, tr):\n self.points = self.points + tr", "def translate(self, tr):\n c = self.c -self.a*tr[0] -self.b*tr[1]\n self.c =c\n self.pointN = self.pointN+tr\n self.point1 = self.point1+tr\n self.points +=tr", "def __mul__(self, other: float) -> 'Translation':\n self._vector.setWithArray((self._vector.x * other, self._vector.y * other, self._vector.z * other))\n return self", "def Translate(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Translate(*args, **kwargs)", "def translate(self, vect):\n self.pl.Base = vect\n\n self.comp.Placement = self.pl\n self.box.Placement = self.pl", "def translation(self, d):\n newreg = self.copy()\n _translate(newreg, d)\n return newreg", "def __add__(self, other: float) -> 'Translation':\n self._vector.setWithArray((self._vector.x + other, self._vector.y + other, self._vector.z + other))\n return self", "def apply(self,v):\n return np.tensordot(self._transform, v, axes=([1],[0])) \\\n + self._translation", "def translate(self,\n x,\n trans_args,\n char_list=None,\n rnnlm=None,\n ensemble_models=[]):\n raise NotImplementedError(\"translate method is not implemented\")", "def translate(self, tx, ty):\n self._impl.translate(tx, ty)", "def translate(x, y, z):\n global _cmds\n _cmds = f\"translate([{x},{y},{z}])\\n\" + _cmds", "def translation_vector(self):\n return self.affine_matrix[0:3][:, 3]", "def project_vectors(self, vectors):\n return np.dot(vectors, self.components.T)", "def translate_waypoint(self, vector: Sequence[float], n_steps: int):\n for component in range(len(self.coordinates)):\n self.waypoint_vector[component] += vector[component] * n_steps", "def translated(self, vector):\n try:\n l = len(vector)\n except TypeError:\n vector = (vector,)\n l = 1\n\n if not l == self.ndim:\n raise ValueError(\"'vector' must be a sequence containing a number \"\n \"for each dimension in the {} instance\"\n .format(self.__class__.__name__))\n\n new_edges = [e + v for (e, v) in zip(self.edges, vector)]\n return type(self)(new_edges)", "def translate(self):\n\t\tself._translate(True)", "def translate(self, displacement):\n self._center = self._center + np.array(displacement)\n self._position = self._position + np.array(displacement)", "def translation(self, d):\n newpoly = self.copy()\n _translate(newpoly, d)\n return newpoly", "def translate(self, displacement):\n\n self.center = (self.center[0] + displacement[0],\n self.center[1] + displacement[1])", "def translate():\n pass", "def Translate(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_Translate(self, *args)", "def translate(self, displacement):\n self._position = self._position + np.array(displacement)", "def __init__(self,\n x: Optional[Number]=0,\n y: Optional[Number]=0,\n z: Optional[Number]=0):\n self._translation = Vector(x, y, z)", "def Translate(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD3_Translate(self, *args)", "def translate(self):\n pass", "def Translate(*args, **kwargs):\n return _gdi_.GraphicsContext_Translate(*args, **kwargs)", "def from_vector_inplace(self, vector):\n self.pdm.from_vector_inplace(vector)\n # By here the pdm has updated our target state, we just need to\n # update the transform\n self.transform.set_target(self.target)", "def project_vector(self, vector: array_like) -> Vector:\n point_in_space = self.point + vector\n point_on_plane = self.project_point(point_in_space)\n\n return Vector.from_points(self.point, point_on_plane)", "def translate(self, tx: float = 0, ty: float = 0, tz: float = 0) -> 'Component':\n translation = Matrix3D.create()\n translation.translation = adsk.core.Vector3D.create(tx, ty, tz)\n self._local_transform.transformBy(translation)\n self._reset_cache()\n return self", "def transbrl (arg):\r\n return n.translate(p.translate(arg))", "def translate(self, x, y=None):\n if isinstance(x, (tuple, Point, list)):\n dx, dy = x[0], x[1]\n elif y is not None:\n dx, dy = x, y\n else:\n dx, dy = x, x\n self.x = self.x + dx\n self.y = self.y + dy", "def translate(self):\n raise NotImplementedError('subclass must override this method')", "def get_translation_matrix(translation_vector):\r\n T = torch.zeros(translation_vector.shape[0], 4, 4).to(device=translation_vector.device)\r\n\r\n t = translation_vector.contiguous().view(-1, 3, 1)\r\n\r\n T[:, 0, 0] = 1\r\n T[:, 1, 1] = 1\r\n T[:, 2, 2] = 1\r\n T[:, 3, 3] = 1\r\n T[:, :3, 3, None] = t\r\n\r\n return T", "def transl(x, y, z):\n displace_vector = [[x],\n [y],\n [z]]\n return np.matrix(displace_vector)", "def __mul__(self, value: Number):\n return Translation(\n value * self.x,\n value * self.y,\n value * self.z)", "def get_translation(self):\n trans_keys = ''.join(self._trans_dict.keys())\n trans_values = ''.join(self._trans_dict.values())\n\n trans_table = string.maketrans(trans_keys, trans_values)\n translation = self._puzzle.translate(trans_table)\n return translation", "def cur_position_translation(self, translation):\n translation = np.array(translation)\n position = np.ravel(np.array(self.get_current_cartesian_position().position))\n return translation + position", "def reverse(self,v):\n return np.tensordot(self._inverseTransform,\n v-self._translation,axes=([1],[0]))", "def project_vectors(self, vectors):\n X = vectors - self._mean\n return np.dot(X, self.components.T)", "def translate(self, displacement):\n for bound in self._bounds:\n bound.translate(displacement)", "def __sub__(self, other: float) -> 'Translation':\n self._vector.setWithArray((self._vector.x - other, self._vector.y - other, self._vector.z - other))\n return self", "def translate ( self, dx, dy, dz):\n self.x = self.x + dx\n self.y = self.y + dy\n self.z = self.z + dz\n self.xyz = np.array((self.x, self.y, self.z))", "def transformation_from_parameters(axisangle, translation, invert=False):\r\n R = rot_from_axisangle(axisangle)\r\n t = translation.clone()\r\n\r\n if invert:\r\n R = R.transpose(1, 2)\r\n t *= -1\r\n\r\n T = get_translation_matrix(t)\r\n\r\n if invert:\r\n M = torch.matmul(R, T)\r\n else:\r\n M = torch.matmul(T, R)\r\n\r\n return M", "def translate(self, diff: AnyVec) -> None:\n for p in self.planes:\n p += diff\n\n u_axis = Vec(self.uaxis.x, self.uaxis.y, self.uaxis.z)\n v_axis = Vec(self.vaxis.x, self.vaxis.y, self.vaxis.z)\n\n # Fix offset - see 2013 SDK utils/vbsp/map.cpp:2237\n self.uaxis.offset -= Vec.dot(u_axis, diff) / self.uaxis.scale\n self.vaxis.offset -= Vec.dot(v_axis, diff) / self.vaxis.scale", "def apply_rotation_only(self, vector):\n return np.dot(self.rotation_matrix, vector)", "def project(self, vector):\n return self.project_vectors(vector[None, :]).flatten()", "def translate(self, sequences):\n return [self.vocab.translate(t) for t in sequences]", "def translate(self, dx, dy):\n self.origin = (self.origin[0] + dx, self.origin[1] + dy)\n return self", "def translate(self, dx, dy):\n self.origin = (self.origin[0] + dx, self.origin[1] + dy)\n return self", "def __add__(self, other: TranslationType):\n return Translation(\n self.x + other.x,\n self.y + other.y,\n self.z + other.z)", "def translate(self):\n self.clean_data()\n self.clean_symbols()\n self.translate_line_by_line()", "def _translate(polyreg, d):\n if isinstance(polyreg, Polytope):\n # Translate hyperplanes\n polyreg.b = polyreg.b + np.dot(polyreg.A, d)\n else:\n # Translate subregions\n for poly in polyreg.list_poly:\n _translate(poly, d)\n # Translate bbox and cheby\n if polyreg.bbox is not None:\n polyreg.bbox = (polyreg.bbox[0] + d,\n polyreg.bbox[1] + d)\n if polyreg._chebXc is not None:\n polyreg._chebXc = polyreg._chebXc + d", "def __str__(self):\n return \"\"\"translate([{0!s}, {1!s}, {2!s}]) {{\n {3!s}\n}}\"\"\".format(self.vector[0], self.vector[1], self.vector[2], self.body)", "def translate(self, dx, dy):\n self.position = numpy.array((dx + self.position[0],\n dy + self.position[1]))\n\n return self", "def _translate_x(self, x, pixels):\r\n pixels = (pixels/MAX_LEVEL) * TRANSLATE_CONST\r\n pixels = self._randomly_negate_tensor(pixels)\r\n x = tfa.image.translate_ops.translate(x, [-pixels, 0])\r\n return x", "def transform(self, *args, **kwargs):\n r = Vector2(np.dot(args[0], self))\n if kwargs.get(\"norm\", False):\n r = r.normalized()\n return type(self)(r)", "def affine_map(self, W, b):\n self.base_vertices = np.dot(W, self.base_vertices) + b\n self.base_vectors = np.dot(W, self.base_vectors)", "def __lineartrans(self):\n do = self.domain\n self.transpoints = copy(self.pts)\n def t(x):\n return (x - do[0])/(do[1]-do[0])\n for i in range(len(self.transpoints)):\n self.transpoints[i,0] = t(self.transpoints[i,0])", "def transsp (arg):\r\n\r\n return s.translate(p.translate(arg))", "def translate(v: InputTensor) -> t.Tensor:\n result = util.to_tensor(v, dtype=t.float32)\n assert len(result.shape) >= 1\n dimensions = result.shape[-1]\n result = result[..., None, :].transpose(-1, -2)\n result = t.constant_pad_nd(result, [dimensions, 0, 0, 1])\n id_matrix = t.diag(result.new_ones([dimensions + 1]))\n id_matrix = id_matrix.expand_as(result)\n result = result + id_matrix\n return result", "def put_vector(self, term, vector):\n self.terms.append(term)\n self.vectors.append(vector.vector)\n self.real_vectors.append(vector)\n return self.dict.update({term: vector})", "def translate(self, target) -> Symbol:\n pass", "def translate(molecule,dx,dy,dz):\n for atom in get_atoms(molecule):\n translate_atom(atom,dx,dy,dz)\n return", "def translate(self, language=None):", "def vector_proj(v, w):\n w_hat = vector_hat(w)\n return vector_dot(v, w_hat) * w_hat", "def accelerate(self, vector):\n (self.angle, self.speed) = addVectors((self.angle, self.speed), vector)", "def translate(self,\n step_x: Scalar,\n step_y: Scalar) -> 'Multipoint[Scalar]':\n return self._context.translate_multipoint(self, step_x, step_y)", "def translateEuler(self,trans):\n return np.array([[1,0,0,trans[0]],[0,1,0,trans[1]],[0,0,1,trans[2]],[0,0,0,1]])", "def translate(self, tx : float, ty : float, tz : float):\n answ = self.clone()\n for i in range(len(self._elements)):\n answ._elements[i]._element = self._elements[i].element.translate(tx, ty, tz)\n\n return answ", "def pseudoinverse_vector(self, vector):\n return -vector", "def translations_to_projective_transforms(\n translations: TensorLike, name: Optional[str] = None\n) -> tf.Tensor:\n with tf.name_scope(name or \"translations_to_projective_transforms\"):\n translation_or_translations = tf.convert_to_tensor(\n translations, name=\"translations\", dtype=tf.dtypes.float32\n )\n if translation_or_translations.get_shape().ndims is None:\n raise TypeError(\"translation_or_translations rank must be statically known\")\n elif len(translation_or_translations.get_shape()) == 1:\n translations = translation_or_translations[None]\n elif len(translation_or_translations.get_shape()) == 2:\n translations = translation_or_translations\n else:\n raise TypeError(\"Translations should have rank 1 or 2.\")\n num_translations = tf.shape(translations)[0]\n # The translation matrix looks like:\n # [[1 0 -dx]\n # [0 1 -dy]\n # [0 0 1]]\n # where the last entry is implicit.\n # Translation matrices are always float32.\n return tf.concat(\n values=[\n tf.ones((num_translations, 1), tf.dtypes.float32),\n tf.zeros((num_translations, 1), tf.dtypes.float32),\n -translations[:, 0, None],\n tf.zeros((num_translations, 1), tf.dtypes.float32),\n tf.ones((num_translations, 1), tf.dtypes.float32),\n -translations[:, 1, None],\n tf.zeros((num_translations, 2), tf.dtypes.float32),\n ],\n axis=1,\n )", "def set_trans(self, head_mri_trans):\n x, y, z = -self.mri_origin[0]\n mri_tgt_trans = translation(x, y, z)\n head_tgt_trans = np.dot(mri_tgt_trans, head_mri_trans)\n\n x, y, z = self.hsp.nasion[0]\n src_hsp_trans = translation(x, y, z)\n src_tgt_trans = np.dot(head_tgt_trans, src_hsp_trans)\n\n rot_x, rot_y, rot_z = rotation_angles(src_tgt_trans[:3, :3])\n x, y, z = src_tgt_trans[:3, 3]\n\n self.rot_x = rot_x\n self.rot_y = rot_y\n self.rot_z = rot_z\n self.trans_x = x\n self.trans_y = y\n self.trans_z = z", "def x(self, value: Number):\n self._translation[0, 0] = value", "def setUnitVector(self, vector):\n self.angle = vector.angle" ]
[ "0.7729765", "0.7498974", "0.7481727", "0.7118544", "0.70570266", "0.6959108", "0.69552064", "0.692206", "0.68971455", "0.6878331", "0.68672305", "0.6747671", "0.6685557", "0.66658247", "0.6580302", "0.6569017", "0.65219915", "0.6508473", "0.6450745", "0.6443207", "0.6412782", "0.63318", "0.6326478", "0.63198113", "0.62746745", "0.61678016", "0.6153883", "0.615074", "0.6149528", "0.61344045", "0.60238665", "0.60126156", "0.5909199", "0.5901612", "0.58531743", "0.5827462", "0.5806589", "0.58026516", "0.57678384", "0.5691994", "0.5683234", "0.56192595", "0.56160533", "0.56095064", "0.5600191", "0.5589655", "0.5587329", "0.55653274", "0.55644166", "0.55511326", "0.5550688", "0.55358183", "0.5494299", "0.547429", "0.5472137", "0.54504883", "0.5445856", "0.54456437", "0.544416", "0.54139787", "0.540768", "0.5392054", "0.53748554", "0.53718203", "0.5370717", "0.5368157", "0.5361208", "0.5333043", "0.53323174", "0.5330654", "0.5329561", "0.5286078", "0.5278836", "0.5262457", "0.5262457", "0.521912", "0.520645", "0.52055746", "0.5205464", "0.51601565", "0.5143506", "0.51376545", "0.51353663", "0.51340455", "0.5108178", "0.51007575", "0.5099747", "0.50978893", "0.50862646", "0.50853443", "0.50849986", "0.5065784", "0.5054337", "0.5053717", "0.5027852", "0.50227463", "0.5022624", "0.5014396", "0.5009958", "0.50084025" ]
0.6492934
18
Scales this atom by the given scale value vector.
def scale(self, scale): self.coords = self.coords * scale return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale_vector(vector, scale):\n return vector[0] * scale, vector[1] * scale, vector[2] * scale", "def apply_scale( vectors, scale ):\n # create a scaling matrix\n matrix = numpy.array([\n [ scale[ 0 ], 0.0, 0.0 ],\n [ 0.0, scale[ 1 ], 0.0 ],\n [ 0.0, 0.0, scale[ 2 ] ]\n ])\n return numpy.dot( vectors, matrix )", "def _scale_setter(self, value: float) -> None:\n self.uaxis.scale = value\n self.vaxis.scale = value", "def scale(self, scale):\n \n scale_matrix = wf.scaleMatrix(scale, self.width/2, self.height/2, 0)\n self.transform(scale_matrix)", "def scale(self, scale):\n\t\tself._current_score *= scale", "def scale_it(val):\n return scale(val, 0, 1, bpm_range[0], bpm_range[1])", "def _call_scale(vecObj, sc):\n res = vecObj.scale(sc)\n return res", "def scale(self, scale):\n\n self._scale = scale", "def scale(self, scale):\n\n self._scale = scale", "def scale(self, scale_x: float, scale_y: float) -> None:\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y", "def with_scale_op(self, scale):\n\t\tself.variables['scale'] = scale\n\t\treturn self", "def __mul__(self, scale):\n return Vec(self.x * scale, self.y * scale)", "def scale(self,s):\n return Vector(self.x * s, self.y * s, self.z * s)", "def scale(self, s):\n for n in range(len(self.mV)):\n self.mV[n] *= s\n return self", "def scale(s: (float, int), v: Vector) -> Vector:\n coords = list()\n res = Vector(coords)\n for i in range(len(v.coords)):\n res.coords[i] *= s\n return res", "def scale(self, up):\n s = 1.1 if up else 0.9\n self.scaling_matrix = np.dot(\n self.scaling_matrix,\n F.scaling([s, s, s])\n )\n\n self.aabb.scale(s)", "def __scale_constraint(c, v):\n if c.equality:\n c.set_value((c.lower * v, c.body * v))\n else:\n c.set_value(\n (__none_left_mult(c.lower, v), c.body * v, __none_left_mult(c.upper, v))\n )", "def scale(self, scale_factor: float) -> None:\n self.tensor[:, :3] *= scale_factor", "def scale(self, const):\n return Vector(*[self[i]*const for i in range(len(self))])", "def setScale(self, *args):\n return _libsbml.Unit_setScale(self, *args)", "def Scale(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Scale(*args, **kwargs)", "def scale(self, factor):\n self.b = factor * self.b", "def set_scale(self, scale):\n scale = float(scale)\n if scale <= 1:\n raise ValueError('The scale parameter must exceed 1.')\n self._a = scale", "def scale(self, sc):\n daskD.wait(self.client.map(_call_scale, self.vecDask, sc=sc, pure=False))\n return self", "def xscale(value):\n impl.xscale(**locals())", "def scale(self, scale=1):\n self.x *= scale\n self.y *= scale\n self.width *= scale\n self.height *= scale\n\n # Always update the corners after operation\n self.update_corners()\n return", "def scale(self, factor):\n for a in self.symbol_attributes:\n a.scale(factor)", "def scale(self, value):\n\t\toldscale = self.oldmax - self.oldmin\n\t\tnewscale = self.newmax - self.newmin\n\t\treturn (newscale * (value - self.oldmin) / oldscale) + self.newmin", "def apply_scale( vertices, scale=1.0 ):\n checkVerticesValidity( vertices )\n if type(scale) != float:\n raise ValueError\n \n for i in range(len(vertices)):\n v = vertices[i]\n tmpv = [v[0]*scale, v[1]*scale, v[2]*scale]\n vertices[i] = tmpv", "def update_axis_scale(self, scale, axis='left'):\n self.plt.getAxis(axis).setScale(scale=scale)", "def scale(self, axis, value):\r\n assert (axis < EventStream.numAxes), \"Axis number out of range\"\r\n if self.absInfo[axis]:\r\n return self.absInfo[axis].scale(value)\r\n else:\r\n return value", "def scale(self, sval: complex) -> None:\n self.coeff = self.coeff.astype(numpy.complex128) * sval", "def scale(self,factor):\n for x in range(len(self.coord)):\n self.coord[x] = np.array([y*factor for y in self.coord[x]])\n return self", "def scale(self, x, y, z) -> None:\n ...", "def scale_vector(vector, f):\n f = float(f)\n return [vector[0] * f, vector[1] * f, vector[2] * f]", "def scale(self, scale_factor: Union[float, Tuple[float, float]]):\n\n if isinstance(scale_factor, float):\n self.x *= scale_factor\n self.y *= scale_factor\n self.width *= scale_factor\n self.height *= scale_factor\n\n elif isinstance(scale_factor, tuple):\n scale_x, scale_y = scale_factor\n self.x *= scale_x\n self.y *= scale_y\n self.width *= scale_x\n self.height *= scale_y", "def scale(first,scalar):\n if isinstance(first,FreeCAD.Vector):\n return FreeCAD.Vector(first.x*scalar, first.y*scalar, first.z*scalar)", "def __scale_bboxes(self, bboxes, scale_x, scale_y):\n with tf.variable_scope('scale_bboxes'):\n return tf.multiply(bboxes, tf.tile([[scale_y, scale_x, scale_y,\n scale_x]],\n [tf.shape(bboxes)[0], 1]))", "def scale(self, sf):\n self.scale(sf, sf)", "def scale(self, other):\n return Vector(other * self.x, other * self.y)", "def scale(self, value):\r\n return (float(value)-float(self.minimum))/float(self.maximum-self.minimum)*2.0 - 1.0", "def scale(self, factor):\n new = self.copy()\n new.d.clear()\n\n for val, prob in self.items():\n new.set(val * factor, prob)\n return new", "def scale(self):", "def _adjust_scale(self, value):\n if self._min_val <= value <= self._max_val:\n self._scale_var.set(value)\n self.update_label_text()", "def scale(self,bvp):\n\n sol = bvp.solution\n # Additional aux entries for initial and terminal BCs\n extras = [{'type':'initial','vars':self.problem_data['state_list']},\n {'type':'terminal','vars':self.problem_data['state_list']}]\n\n # Scale the states and costates\n for idx,state in enumerate(self.problem_data['state_list']):\n sol.y[idx,:] /= self.scale_vals['states'][state]\n\n # Scale auxiliary variables\n for aux in (self.problem_data['aux_list']+extras):\n if aux['type'] not in Scaling.excluded_aux:\n for var in aux['vars']:\n sol.aux[aux['type']][var] /= self.scale_vals[aux['type']][var]\n\n # Scale parameters\n for idx, param in enumerate(self.problem_data['parameter_list']):\n sol.parameters[idx] /= self.scale_vals['parameters'][param]", "def set_scaling(self, scaling):\n self.scaling = scaling\n self.eff_box_size = int(self.box_size*self.scaling+0.5)", "def scale(self, scale_factor: Union[float, Tuple[float, float]]):\n\n if isinstance(scale_factor, float):\n self.width *= scale_factor\n self.height *= scale_factor\n elif isinstance(scale_factor, tuple):\n scale_x, scale_y = scale_factor\n self.width *= scale_x\n self.height *= scale_y", "def scale_vectors(vectors, f):\n return [scale_vector(vector, f) for vector in vectors]", "def scale(self, factor):\n return BSplineFunc(self.kvs, self.coeffs * factor)", "def scale(self, value):\n return (float(value) - float(self.minimum)) / \\\n float(self.maximum - self.minimum) * 2.0 - 1.0", "def scale_uniform(self, s: float):\n self.vertices = [v * s for v in self.vertices]\n return self", "def scale(self) -> Optional[pulumi.Input['ScaleArgs']]:\n return pulumi.get(self, \"scale\")", "def scaleAll(self, scale):\n center = [self.width/2, self.height/2, 0, 0]\n matrix = self.scaleMatrix(scale, scale, scale)\n\n for wireframe in self.wireframes.values():\n wireframe.scale(center, matrix)", "def scale(self, points, inplace=True):\n points = np.array(points).astype(float)\n if inplace==False:\n points = points.copy()\n # if len(points.shape) == 1:\n # points = points[None,:]\n # if len(points.shape) != 2:\n # logger.error(\"cannot scale array of dimensions\".format(len(points.shape)))\n points -= self.origin\n points /= self.scale_factor\n return points", "def scale(self, sx, sy):\n self._impl.scale(sx, sy)", "def scale(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"scale\")", "def get_scaled_value(self, value):\r\n raise NotImplementedError()", "def scale(self):\n return self._a", "def edit_scale(scale, direction):\n if direction in (up, shift_up, plus):\n scale = scale*2\n elif direction in (down, shift_down, minus):\n scale = scale/2\n return scale", "def scale(c, scalar):\n return [c[0]*scalar, c[1]*scalar]", "def parallel_scale(self, value):\n self.camera.parallel_scale = value\n self.Modified()", "def scale(x, a=5, b=10, xmin=-1, xmax=1):\n return (b - a)*(x - xmin)/(xmax - xmin) + a", "def scale(self, alpha):\n\t\tc = SparseVector(self.d)\n\t\tfor i in self.st.keys():\n\t\t\tc.put(i, alpha*self.get(i))\n\t\treturn c", "def scale(self, factors):\n if isinstance(factors, numbers.Number):\n factors = np.ones(self.dim) * factors;\n self.raw_wires.scale(factors);", "def b_scale_object():\n \n bpy.ops.transform.resize(value=(7.5,1,1), constraint_axis=(True,False,False))\n bpy.ops.transform.resize(value=(1,7.5,1), constraint_axis=(False,True,False))\n bpy.ops.transform.resize(value=(1,1,3.5), constraint_axis=(False,False,True))\n bpy.ops.object.transform_apply(scale=True)", "def scale(self):\n return self._gev_bijector.scale", "def set_scale_control(self, scale_ctl=3):\n self._scale_ctl = scale_ctl", "def setScale(self, sx, sy=None, sz=None):\n self.transform.setScale(sx, sy, sz)", "def scale(self, factor):\n self.x *= factor\n self.y *= factor\n for a in self.annotations:\n a.scale(factor)", "def scale(self, size=128):\n scale_factor = size / max(self.voxels.shape)\n self.voxels = ndimage.zoom(self.voxels, scale_factor)\n self.point_position = self.point_position * scale_factor\n self.voxel_size = False # To ignore this\n \n return(self)", "def scale(requestContext, seriesList, factor):\n for series in seriesList:\n series.name = \"scale(%s,%g)\" % (series.name,float(factor))\n series.pathExpression = series.name\n for i,value in enumerate(series):\n series[i] = safeMul(value,factor)\n return seriesList", "def scale(v: float, a: float, b: float, c: float, d: float) -> float:\n v01 = (v - a) / (b - a)\n return c - v01 * (c - d)", "def scale(self):\n return self.distribution.scale", "def scale_uv(self):\n self.u = [i * self.scale * self.scaleratio for i in self.u]\n self.v = [i * self.scale for i in self.v]", "def scale(self,\n factor_x: Scalar,\n factor_y: Optional[Scalar] = None) -> 'Multipoint[Scalar]':\n return self._context.scale_multipoint(\n self, factor_x, factor_x if factor_y is None else factor_y\n )", "def _call_scaleAdd(vecObj, vec2, sc1, sc2):\n res = vecObj.scaleAdd(vec2, sc1, sc2)\n return res", "def scale(self, sx: float = 1, sy: float = 1, sz: float = 1,\n center: Onion[Iterable[Onion[float, int]], Point3D] = None) -> 'Component':\n scale = Matrix3D.create()\n translation = Matrix3D.create()\n if abs(sx) != abs(sy) or abs(sy) != abs(sz):\n raise ValueError(\"Non-uniform scaling is not currently supported\")\n\n if center is None:\n center_point = self._origin\n elif isinstance(center, Point3D):\n center_point = center\n elif isinstance(center, Point):\n center_point = center.point\n else:\n center_coordinates = list(center)[0:3]\n while len(center_coordinates) < 3:\n center_coordinates.append(0)\n center_point = Point3D.create(*center_coordinates)\n\n translation.translation = center_point.asVector()\n translation.invert()\n self._local_transform.transformBy(translation)\n\n scale.setCell(0, 0, sx)\n scale.setCell(1, 1, sy)\n scale.setCell(2, 2, sz)\n self._local_transform.transformBy(scale)\n\n translation.invert()\n self._local_transform.transformBy(translation)\n\n self._reset_cache()\n return self", "def scale_point(point, centroid, scale):\n point = np.asarray(point)\n centroid = centroid[:2]\n vector = ((point - centroid)*scale) + centroid\n return vector", "def yscale(value):\n impl.yscale(**locals())", "def scale(self,scale_by):\n x = self._x * scale_by\n y = self._y * scale_by\n return Point(x,y)", "def __set_constraint_transform_applied_scaling_factor(c, v):\n try:\n c.parent_block().constraint_transformed_scaling_factor[c] = v\n except AttributeError:\n c.parent_block().constraint_transformed_scaling_factor = pyo.Suffix(\n direction=pyo.Suffix.LOCAL\n )\n c.parent_block().constraint_transformed_scaling_factor[c] = v", "def scale(x, p=2, inplace=False):\n return x / np.linalg.norm(x, ord=p)", "def scale(self, fname, **kw):\n return self.scales.scale(fname, **kw)", "def scale(inp, ab):\n\n return inp * ab[0] + ab[1]", "def test_scale(self):\n # normal_vec, support_factor, scaling_factor\n orig = ([3, 4], 1, 1)\n normal_vec, support_factor, scaling_factor = orig\n # noinspection PyTypeChecker\n emb = ConceptEmbedding(normal_vec=normal_vec,\n support_factor=support_factor,\n scaling_factor=scaling_factor)\n\n # Scaling yields new instance and old one is not changed:\n assert emb is not emb.scale(), \"Scaling did not yield new instance\"\n assert np.allclose(emb.normal_vec, np.array(normal_vec))\n assert np.allclose(emb.support_factor, np.array(support_factor))\n assert np.allclose(emb.scaling_factor, np.array(scaling_factor))\n\n # Scaling embedding of scaling factor 1 does nothing\n scaled_unchanged_emb = emb.scale()\n assert np.allclose(emb.normal_vec, scaled_unchanged_emb.normal_vec)\n assert np.allclose(emb.support_factor,\n scaled_unchanged_emb.support_factor)\n assert np.allclose(emb.scaling_factor,\n scaled_unchanged_emb.scaling_factor)\n\n # Normalization and then scaling should yield the same embedding:\n backscaled_emb = emb.normalize().scale()\n for key, (expected, obtained) in \\\n {\"normal_vec\": (emb.normal_vec, backscaled_emb.normal_vec),\n \"support_factor\": (emb.support_factor,\n backscaled_emb.support_factor),\n \"scaling_factor\": (emb.scaling_factor,\n backscaled_emb.scaling_factor)\n }.items():\n assert np.allclose(obtained, expected), \\\n (\"Wrong normalized {}: expected {}, but was {}\"\n .format(key, expected, obtained))\n\n # Simple scaling example: scale by 2\n emb.scaling_factor = 2\n new = ([6, 8], 0.5, 1)\n normal_vec, support_factor, scaling_factor = new\n scaled_emb = emb.scale()\n assert np.allclose(normal_vec, scaled_emb.normal_vec)\n assert np.allclose(support_factor, scaled_emb.support_factor)\n assert np.allclose(scaling_factor, scaled_emb.scaling_factor)\n\n # Another scaling example: scale by -2\n emb.scaling_factor = -2\n new = ([-6, -8], -0.5, 1)\n normal_vec, support_factor, scaling_factor = new\n scaled_emb = emb.scale()\n assert np.allclose(normal_vec, scaled_emb.normal_vec)\n assert np.allclose(support_factor, scaled_emb.support_factor)\n assert np.allclose(scaling_factor, scaled_emb.scaling_factor)", "def scale(self, scale_factor: Union[float, Tuple[float, float]]):\n self.page.scale(scale_factor)\n for token in self.tokens:\n token.scale(scale_factor)", "def ScaleShape(shape, scale_x, scale_y):\n for i, pt in enumerate(shape.points):\n x, y = pt\n shape.points[i] = [scale_x * x, scale_y * y]", "def Scale(*args, **kwargs):\n return _gdi_.GraphicsContext_Scale(*args, **kwargs)", "def scale(c,v,p):\n scaleval = min([coeff.valuation(p) for coeff in c.coefficients()])\n if scaleval > 0:\n c = c/(p**scaleval)\n v = v - scaleval\n if v <= 0:\n flag = False\n else:\n flag = True\n return [flag,c,v]", "def scale(self):\n return self._scale", "def scale(self, k_x, k_y = None):\r\n if (k_y is None):\r\n return vec2(k_x*self.x, k_x*self.y)\r\n else:\r\n return vec2(k_x*self.x, k_y*self.y)", "def test_set_scale():\n data = io.create_sample_Dataset()\n tmp = data.piv.set_scale(1.0)\n assert np.allclose(tmp[\"x\"], data[\"x\"])\n\n tmp = data.copy()\n tmp.piv.set_scale(2.0)\n tmp_mean = tmp[\"u\"].mean(dim=(\"t\", \"x\", \"y\")).values\n data_mean = data[\"u\"].mean(dim=(\"t\", \"x\", \"y\")).values\n assert np.allclose(tmp_mean / data_mean, 2.0)", "def scale(self):\n return self.scale_factor / CONSTANTS.AU", "def getScaleValues(a, x):\n raise NotImplementedError('getScaleValues not implemented')", "def myscale(g, factor=1.0):\n g.setdata(factor * g.getdata())\n # if !g.frozen eq 0 then show", "def scale(self) -> Tuple[float, float]:\n return self._scale", "def scale(inp, ab):\n\n return inp * ab[0] + ab[1]\n # pass", "def scale(self, x: float, y: float):\n # Scaling is done from (0, 0), so that figure won't move after scaling\n dx = np.min(self.points[:, 0])\n dy = np.min(self.points[:, 1])\n self.points[:, 0] -= dx\n self.points[:, 1] -= dy\n\n matrix = np.array([\n [x, 0, 0],\n [0, y, 0],\n [0, 0, 1]\n ])\n\n self.points = self.points @ matrix\n self.points[:, 0] += dx\n self.points[:, 1] += dy\n\n self.points = self.points.astype(int)\n self.start_points = self.points\n self.center_point = self.midpoint()\n self.recalculate_pivots()", "def scale(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"scale\")", "def scale(self, column_name, factor):\n self.check_for_column(column_name)\n self.data[column_name] *= factor" ]
[ "0.7549334", "0.7472794", "0.74334973", "0.7333459", "0.728429", "0.719025", "0.7162344", "0.71475494", "0.71475494", "0.7143308", "0.7092872", "0.7072481", "0.70674723", "0.7027254", "0.69800884", "0.6965245", "0.6939701", "0.69374204", "0.6909835", "0.68787146", "0.6864351", "0.6862714", "0.6858827", "0.6804057", "0.6799594", "0.6767316", "0.6759552", "0.67210805", "0.6694325", "0.6691579", "0.66352516", "0.66269", "0.6619138", "0.66049695", "0.6568666", "0.65608203", "0.6558872", "0.6524701", "0.65091014", "0.64784783", "0.6456094", "0.6447812", "0.64475584", "0.64438045", "0.6427445", "0.6426862", "0.6419047", "0.64125687", "0.63978845", "0.63933235", "0.6392901", "0.63824254", "0.63450587", "0.6338759", "0.63240856", "0.6318905", "0.62763506", "0.62751263", "0.626526", "0.6263949", "0.6263565", "0.62602556", "0.6252271", "0.62504625", "0.6232302", "0.6230289", "0.6200883", "0.61872", "0.6175073", "0.61674035", "0.61561066", "0.6141254", "0.6136775", "0.6127835", "0.61262363", "0.61250293", "0.6123683", "0.6122844", "0.61200106", "0.6118127", "0.6110673", "0.6099953", "0.60979813", "0.6095967", "0.6071524", "0.6062927", "0.60580283", "0.6057651", "0.6054279", "0.6053917", "0.6035793", "0.60116124", "0.6002716", "0.5997613", "0.59970605", "0.5995501", "0.5985456", "0.59801036", "0.5968803", "0.5962336" ]
0.73805004
3
Gets the symbol of this atom. Returns
def get_symbol(self): return self.symbol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def symbol(self):\n return self._symbol", "def symbol(self): \n return self.__symbol", "def symbol_id(self) -> str:\n return self._symbol", "def getSymbol(self):\n return _libsbml.InitialAssignment_getSymbol(self)", "def getElementSymbol(self):\n dataDict = self.__dict__\n yy = self\n while yy is not None:\n xx = yy\n yy = xx.findFirstChemAtomSet()\n \n result = xx.findFirstChemAtom().elementSymbol\n return result", "def symbol(self) -> str:\n return self.current_token", "def symbol(self) -> Optional[str]:\n if self._is_a() or self._is_label():\n return self._cur().split(\"@\")[1]", "def get_display_symbol(self):\n return self.symbol if self.display_symbol is None else self.display_symbol", "def get_display_symbol(self):\n return self.symbol if self.display_symbol is None else self.display_symbol", "def atomic_symbol(self) -> str:\n return self.label", "def getAiSymbol(self) -> str:\n return self.ai.getSymbol()", "def obj(self) -> str:\n return self._symbol", "def obj(self) -> str:\n return self._symbol", "def atomic_symbol(self, atomic_number):\n return self.GetSymbol(atomic_number)", "def getPlayerSymbol(self) -> str:\n return self.player.getSymbol()", "def symbol(self, **kw):\n if not kw:\n raise ValueError(u\"'symbol' needs keyword arguments\")\n res = self.find_symbols(**kw)\n if len(res)==1:\n return res[0]\n else:\n return res", "def read_symbol(self) -> str:\n return self.tape[self.current_position]", "def symbolic_name(self):\n return self._symbolic_name", "def atom(self):\n\n result = self.current_char\n pos = self.pos\n self.next()\n\n if self.current_char is not None and self.current_char.isalpha():\n nresult = result + self.current_char\n if nresult in TOT_SYMBOLS:\n self.next()\n return nresult\n\n if result in TOT_SYMBOLS:\n return result\n else:\n raise LexerException(pos, '{} is not a valid atomic symbol'.format(result))", "def get_symbol(operator):\r\n if isinstance(operator, AST):\r\n operator = type(operator)\r\n try:\r\n return ALL_SYMBOLS[operator]\r\n except KeyError:\r\n raise LookupError('no known symbol for %r' % operator)", "def get_address(self, symbol):\n return self.table[symbol]", "def address(self, symbol):\r\n return self.s_table[symbol]", "def symbol(self):\n if self.content is None:\n return \" \"\n else:\n return self.content.symbol", "def _GetSymbol(atom):\n ks = atom.keys()\n if 'sym' in ks:\n return atom['sym']\n\n for k in ks:\n if k not in PROTECTED_KEYS and isinstance(atom[k], list):\n if len(atom[k]) == 3:\n return k\n\n raise ValueError", "def getSymbolAt(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.symbol.Symbol:\n ...", "def getSymbolValue(self) -> int:\n ...", "def getMibSymbol(self):\n if self.__state & self.stClean:\n return self.__modName, self.__symName, self.__indices\n else:\n raise SmiError('%s object not fully initialized' % self.__class__.__name__)", "def getSymbol(self, name: unicode, namespace: ghidra.program.model.symbol.Namespace) -> ghidra.program.model.symbol.Symbol:\n ...", "def symbol(self):\n if self.currCommType is A_COMMAND:\n return self.currentCommand[COMMAND_START:].strip()\n\n elif self.currCommType is L_COMMAND:\n return self.currentCommand[COMMAND_START:COMMAND_ENDS].strip()", "def next_symbol(self):\r\n try:\r\n return self.rule.rightside[self.position]\r\n except IndexError:\r\n return None", "def get_token(self, name: str) -> Optional[BuiltinTypeSymbol]:\n\n symbol = self._symbols.get(name)\n return symbol", "def get_symbol(self):\n symbol = Symbol(None, None)\n self.skip_spaces()\n\n # Find the type of the file and gives an ID to relevant symbols\n\n if self.current_character.isalpha():\n name_string = self.get_name()\n if name_string in self.keywords_list:\n symbol.type = self.KEYWORD\n else:\n symbol.type = self.NAME\n # if symbol is a name, add name to names list and get ID\n [symbol.id] = self.names.lookup([name_string])\n symbol.value = name_string\n\n elif self.current_character.isdigit():\n symbol.type = self.NUMBER\n # Assign actual number as symbol ID if symbol type is a number\n symbol.value = self.get_number()\n symbol.id = int(symbol.value)\n\n elif self.current_character == \"/\":\n '''Checking for comment by looking for /. Can either be a block comment\n or a line comment so look at the next character as well for * which signals\n a block comment'''\n self.current_character = self.file.read(1)\n if self.current_character == \"*\":\n '''if a block comment is received, get next character, then go into\n block comment method and then immediately perform checks on current character'''\n self.ccurrent_character = self.file.read(1)\n self.skip_comments_block()\n\n else:\n '''if a line comment then current character already relevant comment line\n Perform checks immediately on current character'''\n self.skip_comments_line()\n '''Once exits from comment, self call get_symbol to get the next immediate symbol\n after the comment along with relevant attributes and return it directly'''\n symbol = self.get_symbol()\n return symbol\n\n # Check punctuations\n elif self.current_character == \"=\":\n symbol.type = self.EQUALS\n symbol.value = \"=\"\n\n elif self.current_character == \",\":\n symbol.type = self.COMMA\n symbol.value = \",\"\n\n elif self.current_character == \";\":\n symbol.type = self.SEMICOLON\n symbol.value = \";\"\n\n elif self.current_character == \".\":\n symbol.type = self.PERIOD\n symbol.value = \".\"\n\n elif self.current_character == \"-\":\n self.current_character = self.file.read(1)\n if self.current_character == \">\":\n symbol.type = self.ARROW\n symbol.value = \"->\"\n else:\n symbol.type = self.INVALID\n symbol.value = 'invalid'\n\n elif self.current_character == \"\":\n symbol.type = self.EOF\n symbol.value = \"end\"\n\n else:\n symbol.type = self.INVALID\n symbol.value = 'invalid'\n\n symbol.prev_pos = self.prev_pos\n symbol.position = self.file.tell()\n symbol.line = self.line + 1\n symbol.line_pos = symbol.position - symbol.prev_pos\n\n return symbol", "def get_symbol_by_index(self, index):\n return self[self._index[index]]", "def get_symbol(self, name): # pylint: disable=no-self-use,unused-argument\n if name in self._symbol_cache:\n return self._symbol_cache[name]\n return None", "def get(self, symbol):\n if symbol not in self.symbol_map:\n self.symbol_map[symbol] = self.symbol_counter\n self.symbol_counter += 1\n return self.symbol_map[symbol]", "def _symbol(self,s):\n return self.symbollist[s%len(self.symbollist)]", "def get_token(self, symbol):\r\n for token in self:\r\n if token[\"symbol\"].lower() == symbol.lower():\r\n return token\r\n return None", "def getElementSymbol(self):\n dataDict = self.__dict__\n result = None\n return result", "def get_symbol(symbol):\n st = Stock(symbol)\n return st.get_quote()", "def findSymbol(self, exp):\n k = str(exp)\n try:\n return self.currSyms[k]\n except KeyError:\n raise SymbolNotFound('Identifier not found:<%s>' % (k))", "def _symbol(self):\n if self._symbol_cycle is None:\n self._symbol_cycle = itertools.cycle([WarmFrontolysis._symbol,\n ColdFrontolysis._symbol])\n return next(self._symbol_cycle)", "def _symbol(self):\n if self._symbol_cycle is None:\n self._symbol_cycle = itertools.cycle([WarmFront._symbol, ColdFront._symbol])\n return next(self._symbol_cycle)", "def start_symbol(self) -> Variable:\n return self._start_symbol", "def getSymbolAt(self, address: ghidra.program.model.address.Address, name: unicode) -> ghidra.program.model.symbol.Symbol:\n ...", "def get_symbol_value(self, obj, name):\n # Lookup symbol:\n if obj.has_symbol(name):\n return obj.get_symbol_value(name)\n elif name in self.extra_symbols:\n return self.extra_symbols[name]\n else:\n raise CompilerError(\n 'Undefined reference \"{}\"'.format(name))", "def get_current_symb(self):\n return self.symb_val[-1]", "def _symbol(self):\n if self._symbol_cycle is None:\n self._symbol_cycle = itertools.cycle([WarmFrontogenesis._symbol,\n ColdFrontogenesis._symbol])\n return next(self._symbol_cycle)", "def find_symbol(self, op):\n for ii in self.__symbols:\n if ii.get_name() == op:\n return ii\n return None", "def m_symb(self):\n return self._m_symb", "def get_symbol(self):\n return []", "def find_symbol(self) -> str:\n pattern = struct.pack(\"<HBBBBHQ\", self.event_id, self.version, self.channel, self.level, self.opcode, self.task, self.keyword)\n for start, end in find_segment(self._bv, \".rentries\"):\n offset = self._bv.read(start, end - start).find(pattern)\n if offset == -1:\n continue\n\n symbol = self._bv.get_symbol_at(start + offset)\n if symbol is None:\n continue\n \n return symbol.name\n\n return None", "def symbol_table(self) -> str:\n return self._symbol_table", "def getSymbolAt(self, address: ghidra.program.model.address.Address, name: unicode, namespace: ghidra.program.model.symbol.Namespace) -> ghidra.program.model.symbol.Symbol:\n ...", "def getSymbolAfter(self, symbol: ghidra.program.model.symbol.Symbol) -> ghidra.program.model.symbol.Symbol:\n ...", "def get_atom_code(self, atom):\n for code, symbol in self.__symbols_dict.items():\n # if keyword, return associated code\n if symbol == atom:\n return code\n\n if self.check_if_var(atom):\n # if identifier, return 0\n return 0\n if self.check_if_const(atom):\n # if constant, return 1\n return 1\n\n # invalid atom\n return -1", "def getFunctionSymbol(self, symbol):\n if ( symbol in list(self.dict.keys()) and self.dict[symbol][\"type\"] == \"func\"):\n return self.dict[symbol]\n return None", "def OMSymbol(self, module, name):\n return om.OMSymbol(cdbase=self._cdbase, cd=module, name=name)", "def get_char(self) -> str:\n return self._char", "def get_display_symbol(self):\n if not hasattr(self, \"_cached_display_symbol\"):\n legend = self.xymap.legend\n default_symbol = self.symbol if self.display_symbol is None else self.display_symbol\n self._cached_display_symbol = default_symbol\n\n dirtuple = tuple((key, self.directions[key]) for key in sorted(self.directions.keys()))\n\n replacement_symbol = self.display_symbol_aliases.get(dirtuple, default_symbol)\n\n if replacement_symbol != self.symbol:\n node_or_link_class = legend.get(replacement_symbol)\n if node_or_link_class:\n # initiate class in the current location and run get_display_symbol\n # to get what it would show.\n self._cached_display_symbol = node_or_link_class(\n self.x, self.y, self.Z\n ).get_display_symbol()\n return self._cached_display_symbol", "def get_stack_symbol_from(self, stack_symbol):\n if isinstance(stack_symbol, cfg.Epsilon):\n return pda.Epsilon()\n if self._inverse_stack_symbol[stack_symbol] is None:\n value = str(stack_symbol.value)\n if isinstance(stack_symbol, cfg.Terminal):\n value = \"#TERM#\" + value\n temp = pda.StackSymbol(value)\n self._inverse_stack_symbol[stack_symbol] = temp\n return temp\n return self._inverse_stack_symbol[stack_symbol]", "def decode_symbol(self, bits):\n return self.bits_to_symbol.get(bits)", "def get_symbol(self, entrez_id):\n\n try:\n entrez_id = int(entrez_id)\n except ValueError:\n raise ValueError(\"entrez_id must be an integer\")\n\n\n self.cursor.execute(\"\"\"\n SELECT symbol\n FROM genes\n WHERE entrez_id = %(eid)s\"\"\", {'eid': entrez_id})\n row = self.cursor.fetchone()\n if row is not None:\n return row[0]\n raise KeyError(\"Entrez ID %d was not found in the database\" % entrez_id)", "def _get_address(self, symbol):\n if symbol.isdigit():\n \n return symbol\n else:\n if not self.symbols_table.contains(symbol):\n self.symbols_table.add_entry(symbol, self.symbol_address)\n self.symbol_address += 1\n \n return self.symbols_table.get_address(symbol)", "def getElementName(self):\n return _libsbml.GeneralGlyph_getElementName(self)", "def getSymbolAfter(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.symbol.Symbol:\n ...", "def _sym_constant(self, table: Mapping[int, str]) -> str:\n try:\n return table[self.sym]\n except KeyError:\n return str(self.sym)", "def get_initial_symb(self):\n return self.symb_val[0]", "def getElementName(self):\n return _libsbml.ReactionGlyph_getElementName(self)", "def getElementName(self):\n return _libsbml.ReferenceGlyph_getElementName(self)", "def getElementName(self):\n return _libsbml.TextGlyph_getElementName(self)", "def ambient(self):\n return self._sym", "def atom(self, atom_name, resnum, chain_id, icode=' ', alt=' ', model_num=0):\n return self.struct[model_num][chain_id][(alt, resnum, icode)][atom_name]", "def getElementName(self):\n return _libsbml.SpeciesGlyph_getElementName(self)", "def getElementName(self):\n return _libsbml.CompartmentGlyph_getElementName(self)", "def mnemonic(self):\n return self._mnemonic", "def get_symbol(self, name):\n if not self.ksymtab_initialized:\n self._init_ksymtab()\n for match in re.finditer('{0}\\0'.format(name), self.kernel_image[self.ksymtab_strings_offset:]):\n symbol_str_offset = self.ksymtab_strings_offset + match.start()\n if re.match(r'[0-9a-z_]', self.kernel_image[symbol_str_offset - 1:symbol_str_offset]):\n # Symbol string is a substring of another symbol string,\n # e.g. 'use_mm' is a substring of 'unuse_mm'.\n continue\n debug.debug(\"Found the physical offset of the symbol string \"\n \"'{0}': {1:#010x}\".format(name, symbol_str_offset))\n symbol_str_vaddr = symbol_str_offset + self.page_offset\n symbol_str_vaddr_little = pack('<L', symbol_str_vaddr)\n # TODO: save ksymtab_offset in the object variable\n ksymtab_offset = max(0, symbol_str_offset - KSYMTAB_MAX_SIZE) >> 2 << 2 # align to x4\n ksymtab_data = self.kernel_image[ksymtab_offset:ksymtab_offset + KSYMTAB_MAX_SIZE]\n for match in re.finditer(symbol_str_vaddr_little.encode('hex'), ksymtab_data.encode('hex')):\n ksymtab_entry_offset = ksymtab_offset + match.start() / 2 - 4\n symbol_vaddr, = unpack('<L', self.kernel_image[ksymtab_entry_offset:ksymtab_entry_offset + 4])\n debug.debug(\"Requested kernel symbol '{0}' found: {1:#010x}\".format(name, symbol_vaddr))\n return symbol_vaddr\n debug.debug(\"Requested kernel symbol '{0}' not found\".format(name))\n return None", "def _get_symbolic_symbol(segment):\n\n bytecode_dictionary = {\n \"local\": \"LCL\",\n \"argument\": \"ARG\",\n \"this\": \"THIS\",\n \"that\": \"THAT\",\n }\n\n try:\n return bytecode_dictionary[segment]\n except: # If the segment is not available, it is most likely a variable, so just return it\n return segment", "def atomic_number2element_symbol(atomic_number):\n return ATOMIC_NUMBER2SYMBOL[atomic_number]", "def char(self):\n return self._char", "def getName(self):\n return _libsbml.XMLToken_getName(self)", "def getCharacter(self):\n return _libsbml.ASTNode_getCharacter(self)", "def getElementName(self):\n return _libsbml.SpeciesReferenceGlyph_getElementName(self)", "def getSpaceGroup(self):\n sg = self.stru.space_group()\n t = sg.type()\n return t.lookup_symbol()", "def next(self):\n symbol = None\n while symbol is None:\n if self.index == len(self.brainfuck_code):\n return Symbol.PROGRAM_END\n symbol = self.symbol_dict.get(self.brainfuck_code[self.index])\n self.index += 1\n\n return symbol", "def parameter_symbol(self) -> str:\n return self._parameter_symbol", "def token(self):\n\n return self.__token", "def getAtomName(self, iAtom):\n atomNames = self.getAtomNames()\n return atomNames[iAtom]", "def symbolic_start(self):\n return self.symbolic_bounds[0]", "def getTypeCode(self):\n return _libsbml.GeneralGlyph_getTypeCode(self)", "def get_symbolic_model(self):\n return self.sym_func", "def get_symbolic_model(self):\n return self.sym_func", "def getGlyphId(self):\n return _libsbml.ReferenceGlyph_getGlyphId(self)", "def charname(self):\n return self._charname", "def ord(self):\n return self.value", "def ionic_symbol(self) -> str:\n return self.ion.ionic_symbol", "def getBitsPerSymbol(self):\n \n return self.bits_per_symbol", "def token(self):\n return self._token", "def token(self):\n return self._token", "def token(self):\n return self._token", "def getName(self):\n return _libsbml.InSpeciesTypeBond_getName(self)" ]
[ "0.85209036", "0.8121533", "0.76356006", "0.75384325", "0.73964155", "0.7318919", "0.73038465", "0.7173422", "0.7173422", "0.70371056", "0.69875854", "0.6953391", "0.6953391", "0.6947048", "0.6933806", "0.6931096", "0.68797255", "0.68131906", "0.6801363", "0.6762182", "0.6737989", "0.67216915", "0.6715377", "0.67109805", "0.6690734", "0.66814363", "0.66635394", "0.66204065", "0.66015756", "0.65572214", "0.6538146", "0.65303326", "0.6527447", "0.6505107", "0.6455507", "0.645254", "0.64338064", "0.64245915", "0.6423378", "0.640921", "0.6404937", "0.63837135", "0.63762695", "0.63452417", "0.6311651", "0.6310803", "0.6265548", "0.62552094", "0.6252321", "0.6209389", "0.6186079", "0.61151373", "0.6101171", "0.60822725", "0.6081482", "0.6070125", "0.6049222", "0.6017633", "0.60120577", "0.60005933", "0.5990783", "0.5962333", "0.5958033", "0.59502673", "0.593312", "0.59172523", "0.5901935", "0.5866581", "0.58542305", "0.5771251", "0.5737746", "0.57255816", "0.5716317", "0.57136816", "0.5712028", "0.5700258", "0.5696702", "0.5659826", "0.5655646", "0.5652276", "0.5650716", "0.56447893", "0.5602625", "0.5584891", "0.5566761", "0.5558901", "0.55340034", "0.552649", "0.55202776", "0.55202115", "0.55202115", "0.55151075", "0.55145335", "0.5512807", "0.55080795", "0.55008465", "0.5499653", "0.5499653", "0.5499653", "0.5497798" ]
0.85047024
1
Gets the label of this atom. Returns
def get_label(self): return self.label
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_label(self):\n\n return self._label", "def get_label(self):\n return self._label", "def GetLabel(self):\r\n\r\n return self._label", "def get_label(self, ):\n return self.attrs.get(self.AttributeNames.LABEL, None)", "def GetLabel(self) -> str:\n return self._label", "def GetLabel(self):\n \n return self.label_str", "def label(self):\n return self._label_", "def get_label ( self ):\n return self.label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def GetLabel(self):\r\n\r\n return self.label", "def GetLabel(self):\r\n\r\n return self.label", "def label(self):\r\n return self._label", "def label(self) -> str:\n return self._underlying.label", "def label(self):\n return self.__label", "def label(self):\n return self.__label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def label(self):\n return self.label_", "def _get_label(self):\n return self.label", "def get_label(self):\n return self.job[self.label_key]", "def label(self) -> str:\n return self[\"label\"]", "def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")", "def label(self):\r\n return self._name", "def label(self):\n # type: () -> str\n labels = self.__class__.__labels__\n return force_str(labels.get(self.value, self.name))", "def label(self):\n\n return self.identifier", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return self.name", "def label(self) -> str:\r\n\r\n return self.__label", "def label(self):\r\n return self._text", "def to_label(self):\n return self.label", "def get_name(self):\n return self._label", "def get_label(cls):\n return cls._type_name(cls.label)", "def get_label(cls):\r\n return cls._type_name(cls.label)", "def label_name(self) -> str:\n return pulumi.get(self, \"label_name\")", "def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def get_label(cls) -> str:\n return cls._meta.label_lower.split('.')[-1]", "def label(self) -> str:\n return self.__parameters.label", "def label(self) -> str:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"label\"))\r\n return self._name", "def label(self):\n return self.address.label", "def get_label_name(self):\n command_type = self.get_type()\n if command_type == LABEL_COMMAND_TYPE:\n return self.command[1:-1] # ignores the () at the beginning and the end\n if command_type != EMPTY_COMMAND_TYPE: # increments the line number if it is not a blank line or a label\n self.__line_number += 1", "def get_label(self, key):\n return self.labels.get(key, None)", "def getLabel(self):\n return self.content[:12]", "def getLabel(self):\n if self.__state & self.stClean:\n return self.__label\n else:\n raise SmiError('%s object not fully initialized' % self.__class__.__name__)", "def get_labelname(self):\n return self.options['labelname']", "def label(self):\n return self._label_shape", "def get_label(self):\n return ThreeCardPokerHand.all_labels[self.rank]", "def getLabel(self):\n return _libsbml.GeneProduct_getLabel(self)", "def get_label(self):\n oshape = (ctypes.c_uint * 2)()\n ostride = ctypes.c_uint()\n ret = cxnlib.CXNIOGetLabel(self.handle,\n oshape, ctypes.byref(ostride))\n return ctypes2numpyT(ret, [x for x in oshape], 'float32', ostride.value)", "def label_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label_name\")", "def label(self) -> Optional[str]:\n return self._itempage.labels.get(\"en\", None)", "def GetBitmapLabel(self):\n\n return self.bmpLabel", "def getLabel(self):\n result = self.content[:12]\n if result == \"\":\n if self.tags:\n result = str(self.tags.first)\n return result", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return user_name_for( self.name )", "def get_label(self):\n return openmediavault.string.unescape_blank(\n self.get_udev_property('ID_FS_LABEL_ENC', '')\n )", "def Label(self, default=None):\n return self.data.get('label', default)", "def Label(self, default=None):\n return self.data.get('label', default)", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return 'Column %d' % (self.index + 1)", "def label(self) -> str:\n return self.keysym.label", "def get_label(self):\n\n auth = self.authorizations[0]\n return auth.label", "def getLabelColumn(self):\n return self.getOrDefault(self.labelColumn)", "def gt_label(self):\n return self._gt_label", "def label_index(self):\n return self._label_index", "def label_index(self):\n return self._label_index", "def entity_label(self, eid):\n entities = self._load_entities()\n return entities[eid][\"label\"]", "def getMetaLabel(self, idx):\n return self.label_dict[idx].decode(\"utf-8\")", "def first_label(self):\n if self.labels:\n return self.labels[0]\n else:\n return None", "def get_label_id(self):\n\n return None if self.properties is None else self.properties.get_label_id()", "def relation_label(self, rid):\n relations = self._load_relations()\n return relations[rid][\"label\"]", "def get_current_label(self):\n if self.gt_helper_open:\n return self.gt_helper.get_current_label()\n else:\n return -1", "def getLabel( self, cCtrlName ):\n return self.getControlModelProperty( cCtrlName, \"Label\" )", "def __str__ ( self ):\n return self.get_label()", "def first_label(self):\r\n return self.labels.split(',')[0]" ]
[ "0.844582", "0.843851", "0.8382816", "0.8357652", "0.8327101", "0.8326299", "0.82825094", "0.8282123", "0.82630473", "0.82630473", "0.82630473", "0.82630473", "0.82630473", "0.82630473", "0.82630473", "0.82630473", "0.82630473", "0.82630473", "0.8249067", "0.8249067", "0.8157331", "0.81354845", "0.81063503", "0.81063503", "0.80665565", "0.80665565", "0.80665565", "0.80665565", "0.80665565", "0.80665565", "0.80665565", "0.80525154", "0.80474436", "0.79154134", "0.79052645", "0.7799562", "0.7799562", "0.7799562", "0.779495", "0.7740449", "0.77167153", "0.7703066", "0.76906705", "0.760619", "0.7589734", "0.7548933", "0.7548677", "0.752636", "0.7464254", "0.7424723", "0.7424723", "0.7424723", "0.7424723", "0.73804414", "0.73804414", "0.73804414", "0.73804414", "0.73804414", "0.73804414", "0.73804414", "0.73441654", "0.7324494", "0.7281043", "0.7277859", "0.72094506", "0.72024894", "0.719178", "0.71909755", "0.7159778", "0.71491134", "0.7091169", "0.70905375", "0.7086921", "0.7072584", "0.7038854", "0.70312726", "0.7007673", "0.7001557", "0.6983972", "0.6970553", "0.6970553", "0.6935029", "0.6932552", "0.6929889", "0.6915993", "0.69045323", "0.6877633", "0.6877633", "0.6850235", "0.68497336", "0.68411094", "0.683369", "0.6830571", "0.6808082", "0.6781498", "0.677258", "0.67721367" ]
0.8260186
19
Gets the chain sequence number of the amminoacid this atom belongs to. Returns
def get_ammino_chain_seq(self): return self.ammino_chain_seq
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sequence_number(self):\n return self._sequence_number", "def sequence_number(self):\n # type: () -> int\n return self._sequence_number", "def attempt_sequence_number(self):\n return self._attempt_sequence_number", "def chain_serial(self):\n return self.structure.chain_serial[self.mask]", "def __get_sequence_number(self):\n if self.counter > 999:\n self.counter = 0\n else:\n self.counter += 1\n\n str_sequence_num = self.counter + 256\n str_hex_sequence_num = hex(str_sequence_num)[2:]\n return str_hex_sequence_num", "def get_sequence(self):\n self.__sequence = self.__sequence + 1\n return self.__sequence - 1", "def sequence(self):\n\n\t\tseq = \"\"\n\t\tfor chain in self.chain:\n\t\t\tfor res in chain.residue:\n\t\t\t\tseq += res.aa1()\n\n\t\treturn seq", "def get_atomic_number(self):\n\n return self._atomic_number", "def getResidueNumber(self, iAtom):\n return self._getResiduePointer(iAtom)+1", "def sequence(self):\n return self._sequence", "def sequence(self):\n return self._sequence", "def get_sequence(self, chain_id, model_num = 0):\n if self.get_chain_length(chain_id, model_num) == 0:\n return \"\"\n\n seq = \"\"\n for res in self.residues(chain_id, model_num):\n aa = self.res_definitions.get_one_letter_from_three(res.resname)\n if not aa:\n print \"Setting NCAA as X: \"+res.resname\n print \"This could pose a problem!\"\n seq = seq+'X'\n continue\n\n seq = seq+aa\n return seq", "def residueNumber(self,i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n return freesasa_structure_atom_res_number(self._c_structure,i)", "def casenumber(self) :\n\t\ttry :\n\t\t\treturn self._casenumber\n\t\texcept Exception as e:\n\t\t\traise e", "def get_seq_from_pdbchain(chain):\n type_chain = check_type(chain)\n if type_chain == \"protein\":\n three_res_list = []\n for res in chain:\n residues_atoms = res.get_atoms()\n for atom in residues_atoms:\n if atom.get_name() == 'CA':\n residue = atom.get_parent()\n three_res_list.append(residue.get_resname())\n return three_to_one(three_res_list) # three_to_one function\n else:\n nucleic_acid_res = []\n for res in chain:\n residues_atoms = res.get_atoms()\n for atom in residues_atoms:\n if atom.get_name() == 'P':\n residue = atom.get_parent()\n nucleic_acid_res.append(residue.get_resname())\n nucleic_acid_seq = [x[2] for x in nucleic_acid_res]\n return \"\".join(nucleic_acid_seq)", "def sequence_number(self):\n return self._annotations.get(EventData.PROP_SEQ_NUMBER, None)", "def sequence (self):\n seq_av_at = \"%s:__seq__\" % (self.classkey)\n seq = r.incr (seq_av_at)\n return seq", "def _get_next_sequence_number(self):\n cur = self._next_sequence_number\n self._next_sequence_number += 1\n return cur", "def get_sequence(self, ID):\n try: \n record = self.database[ID]\n except KeyError:\n return '-1'\n sequence = record[\"sequence\"]\n return sequence", "def seq(self):\n return self.__seq", "def get_sequence_index(self):\n\t\treturn call_sdk_function('PrlBootDev_GetSequenceIndex', self.handle)", "def seq(self):\n\t\tif self._record is not None:\n\t\t return self._record.seq\n\t\telse:\n\t\t return None", "def match_seq_num(self):\n return self._get(\"match_seq_num\")", "def sequence(self):\n return self[23]", "def get_atomic_number(molecule, atom_index):\n return molecule.GetAtomAtomicNumber(atom_index)", "def sequence_length(self):\n return self.get_sequence_length()", "def sequence_length(self):\n return self._sequence_length", "def count_amino_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_amino_acids()\n return n", "def nseqs(self):\n return libhts.faidx_nseq(self._fai)", "def readSeq(self):\n # clear buffer in case of errors\n self.flushInput()\n\n if (self.model == 'TDS'):\n self.write('ACQuire:NUMACq?\\n')\n return int(self.readline())\n\n # clear buffer in case of errors\n self.flushInput()", "def get_chain(self, chain):\n if not self.atom_section:\n self.read_atoms_section()\n chain_lines = []\n for at_line in self.atom_section:\n if at_line[21:22] == chain:\n chain_lines.append(at_line)\n return \"\".join(chain_lines)", "def alignment_index(self, sequence_index):\n if sequence_index >= len(self.ungapped()):\n raise IndexError(\"sequence index out of range\")\n sequence_index %= len(self.ungapped())\n iCurrent = -1\n for i, sResidue in enumerate(self.sequence):\n if sResidue not in GAP_CHARACTERS:\n iCurrent += 1\n if iCurrent == sequence_index:\n return i", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def serial_number(self) -> str:\n return self.mac_address", "def last_sequence_ind(self,):\n return self.last_sequence_ind_", "def get_num_sequence(self):\n return len(self.study_list)", "def serial_number(self):\n return self._serialno", "def chain_offset(self):\n return self._chain_offset", "def getSequence(self):\n if self.sequence != None: # a sequence has been assigned\n return self.sequence\n elif self.seqscores != None: # inferred by parsimony but not yet assigned\n return None # determine most parsimonous sequence, not yet implemented", "def serial_num(self) -> int:\n return self._serial_num", "def transport_call_sequence_number(self) -> TransportCallSequenceNumber:\n return self._transport_call_sequence_number", "def tracenb(self):\n trace_nb = self._pna.query('CALC{}:PAR:MNUM?'.format(self._channel))\n if trace_nb:\n return int(trace_nb)\n else:\n raise InstrIOError(cleandoc('''Agilent PNA did not return the\n trace number on channel {} '''.format(self._channel)))", "def getCRLNumber(self):\n\n return self.get_POW().getCRLNumber()", "def getAcNum(self):\n\n # stores the integer account number as a formatted 3-digit string (in which 0's occupy unused digits)\n strAcNum = str(\"{self.acNum:03d}\".format(self=self))\n return strAcNum", "def sequence_number(self, seq_type):\n self.transaction_start()\n try:\n seq = super(SamDB, self).sequence_number(seq_type)\n except:\n self.transaction_cancel()\n raise\n else:\n self.transaction_commit()\n return seq", "def cod_id(self) -> int:\n return self._cod_id", "def line_no(self):\n return self._line_no", "def serial_number(self) -> str:\n return pulumi.get(self, \"serial_number\")", "def get_chain(self):\n return self.segment.chain", "def get_chain(self):\n return self.segment.chain", "def sequence(self):\n return self.unpack_dword(0xC)", "def get_chain1(self):\n return self.atom1.fragment.chain", "def get_cigar(self):\n return self._cigar", "def parent_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"parent_id\")", "def annulus_ident(self) -> int:\n return self._ann_ident", "def rule_number(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"rule_number\")", "def nr_codes(self):\n if self.is_predict_only:\n return clib.xlinear_get_int_attr(self.model_chain, \"nr_codes\")\n else:\n return self.model_chain[-1].nr_codes", "def get_next_identifier(self) -> int:\n if self.items:\n return self.items[-1].identifier + 1\n else:\n return 1", "def count_nucleic_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_nucleic_acids()\n return n", "def get_serial_number(self):\n\n\t\treturn struct.unpack('<Q', self.boot_sector_data[72 : 80])[0]", "def serial_number(self) -> str:\n return self._serial_number", "def getSerialNumber(self):\n return self.getParameter(index=0)", "def get_serial_number(self):\n serial = create_string_buffer(64)\n self._dll.ShamrockGetSerialNumber(self._device, serial)\n return serial.value", "def rn(self):\n return self._rn", "def sequence(self) -> Any:\n return self.__seq", "def get_accession_num(seq_record):\n accession_atoms = seq_record.id.split(\"|\")\n gb_name = accession_atoms[3]\n # strip the version info before returning\n return gb_name[:-2]", "def _get_serial(self):\n with open(self.ca_dir + SERIAL_NAME, 'r+') as serial_file:\n fcntl.flock(serial_file, fcntl.LOCK_EX)\n serial = int(serial_file.read())\n serial_file.seek(0)\n serial_file.truncate()\n serial_file.writelines(['%d'% (serial + 1)])\n return serial", "def new_sequence_number(self):\n retval = self.sender_sequence_number\n if retval >= MAX_SEQNO:\n raise ContextUnavailable(\"Sequence number too large, context is exhausted.\")\n self.sender_sequence_number += 1\n self.post_seqnoincrease()\n return retval", "def accelerator_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"accelerator_id\")", "def accelerator_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"accelerator_id\")", "def pdu_sequence(self) -> PDUs:\n return self.__pdu_sequence", "def get_residue(self, resnum, chain):\n res_chain = self.get_chain(chain)\n residue = []\n for line in res_chain.split(\"\\n\"):\n if str(resnum) == str(line[22:26].strip()):\n residue.append(line)\n return \"\\n\".join(residue)", "def fan_mode_sequence(self) -> int | None:\n return self.cluster.get(\"fan_mode_sequence\")", "def atomic_number(self):\n return atomic_number(self.sym)", "def get_read_number(self):\n\t\tnode = self.find_read_number_block()\n\t\tif node:\n\t\t\ttry:\n\t\t\t\treturn int(node.attrs['read_number'])\n\t\t\texcept:\n\t\t\t\treturn None\n\t\treturn None", "def get_order_number(self):\n return self.__order_number", "def get_sequence(self, shard: dict) -> str:\n shard_id = shard['ShardId']\n sequence_number = self._sequences.get(shard_id)\n if not sequence_number:\n sequence_number = shard['SequenceNumberRange']['StartingSequenceNumber']\n self._sequences[shard_id] = sequence_number\n return sequence_number", "def getSequencefromPDB(pdbfile, chain='C', index=0):\n parser = PDB.PDBParser(QUIET=True)\n struct = parser.get_structure(pdbfile,pdbfile)\n ppb = PDB.PPBuilder()\n model = struct[0]\n peptides = ppb.build_peptides(model[chain])\n seq=''\n for i,pep in enumerate(peptides):\n seq+=str(pep.get_sequence())\n return seq", "def number(self) -> int:\n return self._id", "def _seqno(self):\n self._last_seqno += 1\n return struct.pack(\">L\", self._last_seqno)", "def acl_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"acl_id\")", "def recid(self):\n return self.record[\"control_number\"]", "def _ca_atom(self, residue_id):\n\n residue = self._residue_from_residue_id(residue_id)\n try:\n return residue[\"CA\"]\n except (KeyError, TypeError):\n return None" ]
[ "0.7083817", "0.6912228", "0.63440174", "0.6332537", "0.62717706", "0.62422633", "0.6234725", "0.6123495", "0.61229354", "0.6113672", "0.6113672", "0.60696846", "0.6031649", "0.6021344", "0.6019921", "0.5965752", "0.59603554", "0.59326804", "0.59226", "0.5900438", "0.58660275", "0.5848535", "0.5783916", "0.57827073", "0.5750906", "0.5727806", "0.57013667", "0.5695262", "0.56811553", "0.5637944", "0.5635776", "0.56218696", "0.5620965", "0.5620965", "0.5620965", "0.5620965", "0.5620965", "0.5620965", "0.5620965", "0.5620965", "0.5620965", "0.5620965", "0.5620965", "0.5620965", "0.5620965", "0.5620965", "0.5620965", "0.5620965", "0.5620965", "0.5620965", "0.5612759", "0.5603215", "0.5561395", "0.55313605", "0.5526303", "0.55257976", "0.5525424", "0.5447071", "0.54247135", "0.5419086", "0.54090935", "0.54055506", "0.5399006", "0.53594875", "0.5355578", "0.533801", "0.533801", "0.533399", "0.5328925", "0.5326498", "0.5312088", "0.53029054", "0.5298494", "0.5288609", "0.5284519", "0.52821213", "0.52788407", "0.52752465", "0.52634823", "0.5251707", "0.52444404", "0.52439237", "0.5240001", "0.52397764", "0.52298313", "0.5216641", "0.5216641", "0.52123356", "0.5203753", "0.5201577", "0.5195961", "0.5190851", "0.5183792", "0.51766884", "0.5174454", "0.516713", "0.5155457", "0.5154639", "0.51499254", "0.5141167" ]
0.80430615
0
Gets the euclid distance from this atom to the given atom. Returns
def get_euclid_distance_to(self, atom): return linalg.norm(self.get_coords() - atom.get_coords())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_distance(self) -> int:\n return self.get_measurement_data().distance", "def distance(cls, atom_1, atom_2):\n\t\t\n\t\treturn np.linalg.norm((atom_1-atom_2).atom_loc)", "def __get_distance(self, game_object):\n obj_x, obj_y = game_object.get_coordinates()\n self_x, self_y = self._coordinates\n\n inner = (obj_x-self_x)**2 + (obj_y-self_y)**2\n return math.sqrt(inner)", "def get_distance(self, node):\n return np.sqrt(\n (self.x - node.x) ** 2 +\n (self.y - node.y) ** 2\n )", "def distance(self):\n return self._distance", "async def distance(self):\n return round(await self._rpc.distance(), 2)", "def _euclidian_distance(self, x1, x2):\n a= x1-x2\n a2 = a**2\n b = np.sum(a2, axis=1)\n c = np.sqrt(b)\n return c", "def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre", "def dist(self, other: Coordinate) -> int:\n return abs(other.x - self.x) + abs(other.y - self.y)", "def distance(self):\n return Distance(length_of(self.position.au))", "def distance(self, x: int, y: int) -> float:\n return math.sqrt((x - self.x) ** 2 + (y - self.y) ** 2)", "def get_distance(self, star):\n if self == star:\n return 0\n\n a_car = self.get_cartesian_coords()\n b_car = star.get_cartesian_coords()\n dab = math.degrees(math.acos(a_car[0] * b_car[0] +\n a_car[1] * b_car[1] +\n a_car[2] * b_car[2]))\n return dab", "def distance(self):\n return self.value * len(self.alignment.query)", "def distance_to(self, x):\n return np.linalg.norm(np.array(x) - self.closest_point_to(x))", "def distance_to(self, circle):\n diff = tuple(map(sub, self.pos, circle.pos))\n return math.hypot(*diff)", "def _euclid_distance(self, A, B, axis=1):\n return np.linalg.norm(A - B, axis=axis)", "def distance_to(self, x, y):\n\t\tdx = x - self.x\n\t\tdy = y - self.y\n\t\treturn math.sqrt((dx**2)+(dy**2))", "def get_distance(self, star):\n if self.centroid == star.centroid:\n print(\"distance for same star\")\n return 0\n\n unitary_a = self.get_unitary_vector()\n unitary_b = star.get_unitary_vector()\n dab = math.degrees(math.acos(unitary_a[0] * unitary_b[0] +\n unitary_a[1] * unitary_b[1] +\n unitary_a[2] * unitary_b[2]))\n return dab", "def getDistance(self):\n return sqrt(self.state[0] * self.state[0] + self.state[2] * self.state[2])", "def euclidean_distance(self,):\n return sqrt(pow((self.pose1.x - self.pose2.x), 2) +\n pow((self.pose1.y - self.pose2.y), 2))", "def __compute_distance(self, x, centroid):\n \n diff = x - centroid\n return np.sqrt(np.dot(diff.T, diff))", "def getElectricalDistance(self, neighborID):\n\n if not neighborID in self.Neighbors: # neighborID is not a neighbor\n return -1\n\n for n in range(len(self.Neighbors)):\n if self.Neighbors[n] == neighborID:\n break;\n\n return self.ElectricalDistanceToNeighbors[n]", "def distance_from_origin(self) -> float:\n return self._distance_from_origin", "def distance(self, c1, c2):\r\n x = (c2.x - c1.x) ** 2\r\n y = (c2.y - c1.y) ** 2\r\n d = int(round(math.sqrt(x + y)))\r\n return d", "def getDistance(self):\n taBox = (self.thor * self.tvert)/(720*960) #box area as percentage of whole\n if(taBox==None or taBox<=0): return -1\n const = 4 * math.tan(0.471)*math.tan(0.3576)\n return math.sqrt((self.abox)/(const*taBox))", "def distorted_distance(self):\n return self._distance", "def euclidean_distance(self, other_point):\n return sqrt((self.x - other_point.x)**2 + (self.y - other_point.y)**2)", "def get_distance(start, end):\n\n\t\tloc_start, loc_end, dst_node = create_distance(start, end)\n\t\tdistance = cmds.getAttr(\"%s.distance\" % dst_node)\n\n\t\tcmds.delete([loc_start, loc_end, dst_node])\n\n\t\treturn distance", "def euclidDist(x1, y1, x2, y2):\n c = math.sqrt(((x2-x1)**2) + ((y2-y1)**2))\n\n return c", "def distance_factor(self):\n return self._distancefactor", "def distance(self) -> int:\n return 0", "def get_distance(self, point, cpoint):\n distance = 0.0\n for m, s in zip(point, cpoint):\n distance += pow(m - s, 2)\n distance = math.sqrt(distance)\n return distance", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) **0.5", "def calculate_euclidean_dist(self):\n x_dist = self._current_loc.get_column() - self._goal_loc.get_column()\n y_dist = self._current_loc.get_row() - self._goal_loc.get_row()\n # Note ** is power operator in Python\n return self._current_cost + sqrt(x_dist**2 + y_dist**2)", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_to(self, other):\n if type(other) == GeoPoint:\n other = other.to_cartesian()\n d0 = self.x - other.x\n d1 = self.y - other.y\n d2 = self.z - other.z\n\n return math.sqrt(d0 * d0 + d1 * d1 + d2 * d2)", "def distance_to(self, other):\n dx = other.x - self.x\n dy = other.y - self.y\n return math.sqrt(dx ** 2 + dy ** 2)", "def euclidean_distance(self, other_point):\n\n return math.sqrt(math.pow(other_point.x - self.x, 2) + math.pow(other_point.y - self.y, 2))", "def _computeDistance(self, mote, neighbor):\n\n return 1000*math.sqrt((mote.x - neighbor.x)**2 +\n (mote.y - neighbor.y)**2)", "def _computeDistance(self, mote, neighbor):\n\n return 1000*math.sqrt((mote.x - neighbor.x)**2 +\n (mote.y - neighbor.y)**2)", "def distance_to_origin(self):\n return np.sqrt(self.x ** 2 + self.y ** 2)", "def distance_to(self, other_particle):\n return sqrt((self.pos_x - other_particle.pos_x) ** 2 + (\n self.pos_y - other_particle.pos_y) ** 2)", "def distance_from_center(self, x: int, y: int) -> float:\n width, height = self.width, self.height\n dis = distance(x, y, width/2, height/2)\n return dis", "def Cal_Dist(self):\n sum_euclidean_dist = 0\n last_point = None\n for index, this_point in enumerate(self.__traectory_list):\n if last_point is not None:\n sum_euclidean_dist = ((last_point[0]-this_point[0])**2+(last_point[0]-this_point[1])**2)**0.5\n # Debug: Show cumulative geodetic distance\n # Checked with the beginning and the last one\n #print sum_geodetic_dist\n last_point = this_point\n return sum_euclidean_dist", "def compute_distance(self):\n loc = np.extend_dims(self.state[:, :, Boids.Attr.LOC], axis=-1)\n m = np.tile(loc, (1, 1, self.num_boids))\n pos_diff = m-m.transpose(0, 2, 1)\n self.distance = np.linalg.norm(pos_diff, axis=0)", "def distance(self, to: \"Position\") -> int:\n return abs(self.x - to.x) + abs(self.y - to.y)", "def _calculate_distance(self):\n xy = list(zip(self.x, self.y))\n\n dist = [0]\n for i in range(1, len(xy)):\n dist.append(self.distance_between_two_points(xy[i-1], xy[i]))\n\n return np.array(dist).cumsum()", "def distance(self,case):\r\n return max(abs(self.ligne-case.ligne),abs(self.colonne-case.colonne))", "def distance(self):\n _, _, costs = self.calculate_costs()\n return np.sum(costs)", "def distmpc(self):\n return self._distance.to(\"Mpc\").value", "def _get_distance_diff(self, input):\n nbatch = input.shape[0]\n in1 = input.unsqueeze(1).expand(\n nbatch, self.nelec, self.nelec, self.ndim)\n in2 = input.unsqueeze(2).expand(\n nbatch, self.nelec, self.nelec, self.ndim)\n dist = torch.pow(in1 - in2, 2).sum(3)\n return dist", "def distance(self, other):\n xd, yd = self.x-other.x, self.y-other.y\n return math.sqrt(xd**2 + yd**2)", "def Comoving_Distance(self, z):\n return self.cd_interp(z)", "def EuclideanDistance( self, a, b ):\n return sqrt( self.EuclideanDistanceSq(a,b) )", "def get_euclidean_distance(self, x_coord_1, x_coord_2, y_coord_1, y_coord_2):\r\n\r\n return math.sqrt(((x_coord_1 - x_coord_2) ** 2) + \\\r\n ((y_coord_1 - y_coord_2) ** 2))", "def calcDistance(self, left, right):\n\n return math.fabs(right-left)", "def euclidDist(pair1,pair2):\n return ((pair1[0]-pair2[0])**2+(pair1[1]-pair2[1])**2)**0.5", "def euclidean_distance(self, point):\n mean = self.mean()\n dist = euclidean(mean, point)\n radius = self.radius * self.distance_factor()\n if radius == 0.0:\n # corner case: the ball consists of a single point only\n # distance is defined as > 1 for flat dimensions unless point lies inside\n if point == mean:\n dist = 0.0\n else:\n dist += 1\n else:\n # normalization so that result 1.0 corresponds to dist == radius (i.e., point is on the border)\n dist /= radius\n return dist", "def distance_to(self, other):\n x0,y0 = self.x, self.y\n x1,y1 = other.x, other.y\n dist = math.sqrt((x1-x0) ** 2 + (y1-y0) ** 2)\n return int(dist)", "def get_distance(self, coords):\n return distance.distance(coords, (self.lat, self.long)).m", "def get_distance(self, other):\n return math.sqrt((self.x - other[0])**2 + (self.y - other[1])**2)", "def dist(self, other):\n return math.sqrt((self.x - other.x)**2 +\n (self.y - other.y)**2 +\n (self.z - other.z)**2)", "def distance(self, position):\n s, r = self.local_coordinates(position)\n return abs(r) + max(s - self.length, 0) + max(0 - s, 0)", "def distance(self, pt):\n return math.sqrt((self.x - pt.x) ** 2 + (self.y - pt.y) ** 2)", "def distance_to(self, n):\n\n d = ( (self.x - n.x) ** 2 + (self.y - n.y) ** 2 + (self.z - n.z) ** 2 ) ** 0.5\n \n return d", "def distance(self, other):\n dx = self.x - other.x\n dy = self.y - other.y\n return math.sqrt(dx*dx + dy*dy)", "def dist(self):\n return self._dist", "def get_euclid_dist(vec_1, vec_2):\n\n\treturn np.sqrt(np.sum(np.fabs(vec_1 - vec_2), axis=1)).flatten()", "def get_distance(self):\n values = self.speakers.values()\n values.sort(reverse=True)\n try:\n return abs(values[1]) - abs(values[0])\n except (IndexError, ValueError):\n return -1", "def getEuclideanDistance():\r\n global euclideanDistance\r\n return euclideanDistance", "def distance(self, x):\n return _elas.SwigPyIterator_distance(self, x)", "def distancia(self, other):\n return ((self.x-other.x)**2 + (self.y-other.y)**2 + (self.z-other.z)**2) ** (1 / 2)", "def euclid_dist(location1, location2):\n return np.sqrt((location1[0] - location2[0]) ** 2 + (location1[1] - location2[1]) ** 2)", "def euclidean_distance(self, point: List[int]) -> float:\n return sqrt(point[0] ** 2 + point[1] ** 2)", "def euclidian_distance(x1, y1, x2, y2):\n distance = sqrt(pow((x1-x2), 2)+(pow((y1-y2), 2)))\n return distance", "def distance_to(self, obj):\n\t\tx, y = self.position\n\t\tobj_x, obj_y = obj.position\n\t\treturn hypot(x - obj_x, y - obj_y)", "def distanceFromOrigin(self):\n return ((self.x)**2+(self.y)**2)**0.5", "def get_diameter(self):\n\n if self.no_dist is False:\n dist = self.distance\n diam = dist * self.ang_size / 60. * np.pi/180. * ct._kpc_over_pc_\n self.diam = diam\n else:\n self.diam = -1 # use -1 to indicate unknown diameter\n\n return self.diam", "def get_distance(self, point):\n if not isinstance(point, Point):\n point = Point(*point)\n\n distances = [(point.distance_to_point(p), p) for p in self.points]\n sortpoints = sorted(distances, key=lambda x: x[0])\n closest = sortpoints[0][1]\n\n vc = Vector(*closest)\n d1 = vc.dot(vc)\n\n secondc = sortpoints[1][1]\n vs = Vector(*secondc)\n v1 = Vector(*point) - (vc+vs)/2\n v2 = vs-vc\n v2.unitize()\n d2 = v1.dot(v2)\n\n return abs(min(d1, d2)) - self.thickness/2", "def distance(self, point):\r\n assert a6checks.is_point(point)\r\n assert len(point)==len(self._centroid)\r\n\r\n sum=0\r\n for i in range (len(self._centroid)):\r\n sum+=(point[i]-self._centroid[i])*(point[i]-self._centroid[i])\r\n dist=math.sqrt(sum)\r\n return dist", "def _get_distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))", "def distance(self, lat: float, lon: float) -> float:\n return distance((self.lat, self.lon), (lat, lon))", "def calculate_distance(atom1,atom2): #dot string to show when you go into the help doc of this function\n x_distance = atom1[0]-atom2[0]\n y_distance = atom1[1]-atom2[1]\n z_distance = atom1[2]-atom2[2]\n distance = numpy.sqrt(x_distance**2+ y_distance**2+z_distance**2)\n return distance", "def calculate_euclidean_distance(self, matrix, input, output_neuron):\n result = 0\n\n # Loop over all input data.\n diff = input - matrix[output_neuron]\n return np.sqrt(sum(diff*diff))", "def euclid_dist(vec, mat):\n return np.linalg.norm(mat - vec, axis=1)", "def calculate_distance(self, other):\n return math.sqrt((self.center[0] - other.center[0]) ** 2 + (self.center[1] - other.center[1]) ** 2)", "def distance_to_location(self, row, col):\n return float(sqrt(pow(self._row - row, 2) + pow(self._col - col, 2)))", "def distmeter(self):\n return self._distance.to(\"m\").value", "def distanceTo(self, point):\n return np.linalg.norm([self.x - point.x, self.y - point.y, self.z - point.z])", "def distance(self, other: PointOrIterable = None) -> float:\n return (self.distance_squared(other or Point())) ** 0.5", "def distance(self, other):\n return float(abs(self.x - other.x) + abs(self.y - other.y))", "def distance(self,other):\n return math.sqrt((self.x - other.x)**2 +(self.y - other.y)**2)", "def dist_eccen(self, e):\r\n\r\n return self.uniform(e, self.erange)", "def distance(self, coord1, coord2):\n return (abs(coord1.x - coord2.x) + abs(coord1.y - coord2.y) + abs(coord1.z - coord2.z))//2" ]
[ "0.66581625", "0.6649904", "0.6637657", "0.6630217", "0.6601542", "0.6598605", "0.6596507", "0.63966656", "0.636073", "0.63055176", "0.62724113", "0.62593937", "0.6219049", "0.62136334", "0.62126756", "0.6210324", "0.61876845", "0.61710095", "0.616928", "0.61606264", "0.6159046", "0.6155477", "0.61517185", "0.61355484", "0.6128655", "0.6096839", "0.60678494", "0.6027739", "0.60135216", "0.60121363", "0.5995232", "0.5982541", "0.5980159", "0.5971091", "0.5967924", "0.5967924", "0.5967924", "0.5967924", "0.5967924", "0.5967924", "0.5967924", "0.5957663", "0.5953044", "0.59502697", "0.5941421", "0.5941421", "0.59264266", "0.5925186", "0.5921113", "0.5920276", "0.5915555", "0.5901157", "0.5900112", "0.58870876", "0.5872179", "0.5871164", "0.58640355", "0.58598995", "0.5842618", "0.5842421", "0.58335865", "0.58306754", "0.58273005", "0.58253884", "0.5820019", "0.5815305", "0.5810958", "0.5801486", "0.57921994", "0.57903117", "0.5786603", "0.5785978", "0.57852244", "0.5784011", "0.57777977", "0.5774", "0.5773419", "0.5768458", "0.5766859", "0.5766221", "0.576313", "0.5754473", "0.5747878", "0.57421863", "0.5735089", "0.57158196", "0.57043356", "0.5695689", "0.56881356", "0.56836313", "0.56722325", "0.5659571", "0.5658954", "0.56572074", "0.5651902", "0.5649877", "0.56472886", "0.5645776", "0.5644265", "0.5639622" ]
0.8762926
0
Create a PLaSM cuboid with a color an put it on this atom coords.
def plasm_cube(self, size=0.1, color=WHITE): return COLOR(color)(T([1,2,3])(self.coords)(CUBOID([size, size, size])))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_cube(color=COLOR_WHITE):\n a = Point3(-1.0, -1.0, -1.0)\n b = Point3(1.0, -1.0, -1.0)\n c = Point3(1.0, -1.0, 1.0)\n d = Point3(-1.0, -1.0, 1.0)\n e = Point3(-1.0, 1.0, -1.0)\n f = Point3(1.0, 1.0, -1.0)\n g = Point3(1.0, 1.0, 1.0)\n h = Point3(-1.0, 1.0, 1.0)\n\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n glBegin(GL_QUADS)\n glColor4fv(color)\n drawVertexListCreateNormal([a, b, c, d])\n drawVertexListCreateNormal([b, f, g, c])\n drawVertexListCreateNormal([f, e, h, g])\n drawVertexListCreateNormal([e, a, d, h])\n drawVertexListCreateNormal([d, c, g, h])\n drawVertexListCreateNormal([a, e, f, b])\n glEnd()\n glPopMatrix()\n glEndList()\n return obj", "def create_cube_solid(color=COLOR_WHITE):\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n glColor4fv(color)\n try:\n glutSolidCube(1.0)\n except:\n if not _ERRS[3]:\n printGLError(\n \"la version actual de OpenGL no posee la funcion glutSolidCube\")\n _ERRS[3] = True\n glPopMatrix()\n glEndList()\n return obj", "def cuboid(geometry,\n network,\n propname,\n **params):\n print('cuboid: nothing yet')", "def generaCubo(self):\r\n #Use Panda predefined format for vertex coordinate only\r\n format = GeomVertexFormat.getV3()\r\n \r\n #Build Vertex data using the created format. Vertex will never change so I use Static attribute \r\n vdata = GeomVertexData('CuboData', format, Geom.UHStatic)\r\n \r\n #I will have to write vertex data so I create a writer for these data\r\n vertex = GeomVertexWriter(vdata, 'vertex')\r\n \r\n #I now use the writer to add vertex data\r\n vertex.addData3f(0, 0, 0)\r\n vertex.addData3f(1, 1, 1)\r\n vertex.addData3f(0, 1, 1)\r\n vertex.addData3f(0, 1, 0)\r\n vertex.addData3f(0, 0, 1)\r\n vertex.addData3f(1, 0, 0)\r\n vertex.addData3f(1, 0, 1)\r\n vertex.addData3f(1, 1, 0)\r\n \r\n #I now create 12 triangles\r\n prim = GeomTriangles(Geom.UHStatic)\r\n\r\n #and then I add vertex to them\r\n #Next time use addVertices(0,1,2) !!!\r\n prim.addVertex(7)\r\n prim.addVertex(0)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n \r\n prim.addVertex(3)\r\n prim.addVertex(0)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n \r\n prim.addVertex(2)\r\n prim.addVertex(6)\r\n prim.addVertex(4)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(1)\r\n prim.addVertex(6)\r\n prim.addVertex(2)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(7)\r\n prim.addVertex(2)\r\n prim.addVertex(3)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(1)\r\n prim.addVertex(2)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(3)\r\n prim.addVertex(4)\r\n prim.addVertex(0)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(2)\r\n prim.addVertex(4)\r\n prim.addVertex(3)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(0)\r\n prim.addVertex(6)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(4)\r\n prim.addVertex(6)\r\n prim.addVertex(0)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(5)\r\n prim.addVertex(1)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(6)\r\n prim.addVertex(1)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n\r\n #Create a Geom to bing vertex data to primitives\r\n geom = Geom(vdata)\r\n geom.addPrimitive(prim)\r\n\r\n #Create a node for the Geom in order to be able to render it\r\n node = GeomNode('gnode')\r\n node.addGeom(geom)\r\n\r\n #Adde the node to the scene graph == render it!\r\n nodePath = render.attachNewNode(node)\r\n \r\n #is this needed?\r\n nodePath.setPos( 0, 5, 0)\r\n \r\n self.camera.lookAt(nodePath)\r\n \r\n base.setBackgroundColor( .0, .0, .0 )\r\n \r\n taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")", "def __init__(self, color):\n self.id = Piece.PIECE_ID\n self.crowned = False\n self.color = color\n\n Piece.PIECE_ID += 1", "def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'Gu'", "def __init__(self):\n self.size = 16\n self.color = COLOR\n self.pos = self.spawn()", "def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'Ca'", "def display(self, color = (190,205,205), add = False): \r\n s += pgl.Shape(pgl.FaceSet( [[0,0,0],[1,0,0],[1,1,0],[0,1,0]], [[0,1,2,3]]) , pgl.Material((0,100,0)))", "def __drawCube(self):\n self.cubePos = [[[(160, 160), (200, 160), (240, 160)],\n [(160, 200), (200, 200), (240, 200)],\n [(160, 240), (200, 240), (240, 240)]],\n [[(400, 160), (440, 160), (480, 160)],\n [(400, 200), (440, 200), (480, 200)],\n [(400, 240), (440, 240), (480, 240)]],\n [[(280, 160), (320, 160), (360, 160)],\n [(280, 200), (320, 200), (360, 200)],\n [(280, 240), (320, 240), (360, 240)]],\n [[(40, 160), (80, 160), (120, 160)],\n [(40, 200), (80, 200), (120, 200)],\n [(40, 240), (80, 240), (120, 240)]],\n [[(160, 40), (200, 40), (240, 40)],\n [(160, 80), (200, 80), (240, 80)],\n [(160, 120), (200, 120), (240, 120)]],\n [[(160, 280), (200, 280), (240, 280)],\n [(160, 320), (200, 320), (240, 320)],\n [(160, 360), (200, 360), (240, 360)]]]\n self.cubeColor = {1: 'green', 2: 'blue', 3: 'red', 4: 'orange',\\\n 5: 'white', 6: 'yellow'}\n for x in range(6):\n for y in range(3):\n for z in range(3):\n pos = self.cubePos[x][y][z]\n color = self.cubeColor[self.cube.cube[x][y][z]]\n self.cv.create_rectangle(pos[0], pos[1], pos[0]+40, pos[1]+40,\n fill=color, width='2')", "def __init__(self, color, location):\n\n self._color = color\n self._piece_type = None\n self._location = location", "def create_cube():\n new_cube = RubicsCube2x2()\n show_cube_console(new_cube)\n\n seed = [10, 9, 17, 14, 11, 8, 3, 2, 17, 3, 9, 7, 15, 4, 14, 14, 3, 3, \\\n 13, 7, 15, 9, 14, 13, 11, 17, 7, 10, 5, 16, 11, 5, 7, 10, 14, \\\n 7, 17, 7, 8, 6, 12, 3, 6, 1, 16, 12, 5, 13, 3, 4]\n for move in seed:\n new_cube.do_move(move)\n return new_cube", "def __init__(self, color):\n self._color = color # Color redefined when placed\n # Palace coords\n self._d = ['d1','d2','d3','d8','d9','d10'] \n self._e = ['e1','e2','e3','e8','e9','e10']\n self._f = ['f1','f2','f3','f8','f9','f10']\n self._special = self._d + self._f + self._e\n self._corners = ['d1','f1','e2','d3','f3','d8','d10','f8','f10','e9']", "def __init__(self, nickname, position, direction, color, object_hash = None):\n GameObject.__init__(\n self,\n nickname,\n position,\n direction,\n color = color,\n remote_object = True,\n object_hash = object_hash\n )", "def make_cube(r, g, b):\n ny, nx = r.shape\n R = np.zeros([ny, nx, 3])\n R[:,:,0] = r\n G = np.zeros_like(R)\n G[:,:,1] = g\n B = np.zeros_like(R)\n B[:,:,2] = b\n\n RGB = R + G + B\n\n return R, G, B, RGB", "def make_box(self, scale, color):\n box = Box()\n box.set_color(c=color)\n box.set_location(0, 0, 0)\n box.set_size(scale, scale, scale)\n return box", "def make_box(self, scale, color):\n box = Box()\n box.set_color(c=color)\n box.set_location(0, 0, 0)\n box.set_size(scale, scale, scale)\n return box", "def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'So'", "def __init__( self, seed=(1, 0, 0) ):\n x, y, z = seed\n self._coords = matrix( [[x], [y], [z], [1.]], 'd' )", "def make_soma(self, size, location):\n bpy.ops.mesh.primitive_uv_sphere_add(segments=8, ring_count=8, size=size, location=location)\n # Name object as cell\n bpy.context.object.name = self.id\n # Save referrence\n self.blender_obj = bpy.context.object", "def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'El'", "def paint_square(self, pos, color, cr):\n cr.set_source_rgb(*color)\n i, j = pos\n cr.rectangle(i*DOT_SIZE+1, j*DOT_SIZE-1, DOT_SIZE-2, DOT_SIZE-2)\n cr.fill()", "def __init__(self, x, y, x2, y2, x3, y3, color=(255, 255, 255, 255),\n batch=None, group=None):\n self._x = x\n self._y = y\n self._x2 = x2\n self._y2 = y2\n self._x3 = x3\n self._y3 = y3\n self._rotation = 0\n self._num_verts = 3\n\n r, g, b, *a = color\n self._rgba = r, g, b, a[0] if a else 255\n\n program = get_default_shader()\n self._batch = batch or Batch()\n self._group = self.group_class(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, program, group)\n\n self._create_vertex_list()\n self._update_vertices()", "def place_cube(self,\n cube_xy,\n player=None,\n weight=1,\n azimuth=None,\n return_azimuth=False):\n\n self.color_idx += 1\n if self.color_idx == len(self.colors):\n self.color_idx = 0\n if azimuth is None:\n azimuth = np.random.randint(0, 180)\n else:\n assert azimuth >= 0 and azimuth <= 180\n cube_rot = self.p0.getQuaternionFromEuler([\n 0, 0, np.deg2rad(azimuth)\n ]) # rotated around which axis? # np.deg2rad(90)\n\n alpha = 1 # this could be set to .5 for some transparency\n\n if weight == 1:\n if player is None or self.four_colors:\n color = self.colors[self.color_idx] + [alpha]\n elif player == Player.Player:\n color = [0, 0, 1, 1]\n if DEBUG:\n print(\"Player putting down cube at\", cube_xy)\n elif player == Player.Enemy:\n color = [1, 0, 0, 1]\n if DEBUG:\n print(\"Opponent putting down cube at\", cube_xy)\n elif player == Player.Starter:\n color = [0, 0, 0, 1]\n if self.dark:\n color = [1, 1, 1, 1]\n if DEBUG:\n print(\"Starter cube at\", cube_xy)\n else:\n color = WEIGHT_COLORS[weight]\n\n max_z = self.find_highest_z(cube_xy, azimuth)\n\n cube_pos = [cube_xy[0], cube_xy[1], max_z + 1.0001]\n # print (\"placing cube at\",cube_pos)\n\n cube_visual = self.p0.createVisualShape(\n shapeType=self.p0.GEOM_BOX,\n rgbaColor=color,\n halfExtents=[1, 1, 1]\n # specularColor=[0.4, .4, 0],\n )\n\n cube = self.p0.createMultiBody(\n baseMass=weight,\n # baseInertialFramePosition=[0, 0, 0],\n baseCollisionShapeIndex=self.cube_collision,\n baseVisualShapeIndex=cube_visual,\n basePosition=cube_pos,\n baseOrientation=cube_rot,\n useMaximalCoordinates=True)\n\n self.cubes.append(cube)\n\n if max_z > self.current_max_z:\n self.current_max_z = np.around(max_z)\n out = True\n else:\n out = False\n\n if not return_azimuth:\n return out\n else:\n return out, azimuth", "def put_object(self, surface, p, color):\n coords = self.transform_coordinates(p)\n if not self.in_display(coords):\n return\n pygame.draw.circle(surface,\n color,\n coords,\n int(p.radius / SCALE_FACTOR))", "def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'Ge'\n self._in_check = False", "def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'Ho'", "def cube_colors(self, cubes):\n n = cubes.shape[0]\n col = np.zeros((n ** 3, 3))\n terrain_col = (66, 244, 72)\n empty_col = self.background\n for i in range(n):\n for j in range(n):\n for k in range(n):\n c = cubes[i, j, k]\n col[i * n ** 2 + j * n + k] = empty_col if c.state == 'empty' else terrain_col\n self.wireframe_col = col", "def cube(self):\n\n dims = self.voxels.shape\n max_dim = max(dims)\n \n x_target = (max_dim - dims[0]) / 2\n y_target = (max_dim - dims[1]) / 2\n z_target = (max_dim - dims[2]) / 2\n\n self.voxels = np.pad(self.voxels,\n ((int(np.ceil(x_target)), int(np.floor(x_target))),\n (int(np.ceil(y_target)), int(np.floor(y_target))),\n (int(np.ceil(z_target)), int(np.floor(z_target)))),\n 'constant',\n constant_values=(0))\n\n self.point_position = self.point_position + [np.ceil(z_target),\n np.ceil(y_target),\n np.ceil(x_target)]\n\n return(self)", "def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'Ch'", "def __init__(self, cube_size, time_range):\n\n # cubesize is in z,y,x for interactions with tile/image data\n self.zdim, self.ydim, self.xdim = self.cubesize = [cube_size[2], cube_size[1], cube_size[0]]\n self.time_range = time_range\n self._newcube = False", "def draw_cuboid(self, x_pos, z_pos, half_width, half_depth, height):\n GL.glBegin(GL.GL_QUADS)\n GL.glNormal3f(0, -1, 0)\n GL.glVertex3f(x_pos - half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos + half_depth)\n GL.glNormal3f(0, 1, 0)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos + half_depth)\n GL.glNormal3f(-1, 0, 0)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos + half_depth)\n GL.glNormal3f(1, 0, 0)\n GL.glVertex3f(x_pos + half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos + half_depth)\n GL.glNormal3f(0, 0, -1)\n GL.glVertex3f(x_pos - half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos - half_depth)\n GL.glNormal3f(0, 0, 1)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos + half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos + half_depth)\n GL.glEnd()", "def create_cube(scale=(1.0,1.0,1.0), st=False, rgba=False, dtype='float32', type='triangles'):\n\n shape = [24, 3]\n rgba_offset = 3\n\n width, height, depth = scale\n # half the dimensions\n width /= 2.0\n height /= 2.0\n depth /= 2.0\n\n vertices = np.array([\n # front\n # top right\n ( width, height, depth,),\n # top left\n (-width, height, depth,),\n # bottom left\n (-width,-height, depth,),\n # bottom right\n ( width,-height, depth,),\n\n # right\n # top right\n ( width, height,-depth),\n # top left\n ( width, height, depth),\n # bottom left\n ( width,-height, depth),\n # bottom right\n ( width,-height,-depth),\n\n # back\n # top right\n (-width, height,-depth),\n # top left\n ( width, height,-depth),\n # bottom left\n ( width,-height,-depth),\n # bottom right\n (-width,-height,-depth),\n\n # left\n # top right\n (-width, height, depth),\n # top left\n (-width, height,-depth),\n # bottom left\n (-width,-height,-depth),\n # bottom right\n (-width,-height, depth),\n\n # top\n # top right\n ( width, height,-depth),\n # top left\n (-width, height,-depth),\n # bottom left\n (-width, height, depth),\n # bottom right\n ( width, height, depth),\n\n # bottom\n # top right\n ( width,-height, depth),\n # top left\n (-width,-height, depth),\n # bottom left\n (-width,-height,-depth),\n # bottom right\n ( width,-height,-depth),\n ], dtype=dtype)\n\n st_values = None\n rgba_values = None\n\n if st:\n # default st values\n st_values = np.tile(\n np.array([\n (1.0, 1.0,),\n (0.0, 1.0,),\n (0.0, 0.0,),\n (1.0, 0.0,),\n ], dtype=dtype),\n (6,1,)\n )\n\n if isinstance(st, bool):\n pass\n elif isinstance(st, (int, float)):\n st_values *= st\n elif isinstance(st, (list, tuple, np.ndarray)):\n st = np.array(st, dtype=dtype)\n if st.shape == (2,2,):\n # min / max\n st_values *= st[1] - st[0]\n st_values += st[0]\n elif st.shape == (4,2,):\n # per face st values specified manually\n st_values[:] = np.tile(st, (6,1,))\n elif st.shape == (6,2,):\n # st values specified manually\n st_values[:] = st\n else:\n raise ValueError('Invalid shape for st')\n else:\n raise ValueError('Invalid value for st')\n\n shape[-1] += st_values.shape[-1]\n rgba_offset += st_values.shape[-1]\n\n if rgba:\n # default rgba values\n rgba_values = np.tile(np.array([1.0, 1.0, 1.0, 1.0], dtype=dtype), (24,1,))\n\n if isinstance(rgba, bool):\n pass\n elif isinstance(rgba, (int, float)):\n # int / float expands to RGBA with all values == value\n rgba_values *= rgba \n elif isinstance(rgba, (list, tuple, np.ndarray)):\n rgba = np.array(rgba, dtype=dtype)\n\n if rgba.shape == (3,):\n rgba_values = np.tile(rgba, (24,1,))\n elif rgba.shape == (4,):\n rgba_values[:] = np.tile(rgba, (24,1,))\n elif rgba.shape == (4,3,):\n rgba_values = np.tile(rgba, (6,1,))\n elif rgba.shape == (4,4,):\n rgba_values = np.tile(rgba, (6,1,))\n elif rgba.shape == (6,3,):\n rgba_values = np.repeat(rgba, 4, axis=0)\n elif rgba.shape == (6,4,):\n rgba_values = np.repeat(rgba, 4, axis=0)\n elif rgba.shape == (24,3,):\n rgba_values = rgba\n elif rgba.shape == (24,4,):\n rgba_values = rgba\n else:\n raise ValueError('Invalid shape for rgba')\n else:\n raise ValueError('Invalid value for rgba')\n\n shape[-1] += rgba_values.shape[-1]\n\n data = np.empty(shape, dtype=dtype)\n data[:,:3] = vertices\n if st_values is not None:\n data[:,3:5] = st_values\n if rgba_values is not None:\n data[:,rgba_offset:] = rgba_values\n\n if type == 'triangles':\n # counter clockwise\n # top right -> top left -> bottom left\n # top right -> bottom left -> bottom right\n indices = np.tile(np.array([0, 1, 2, 0, 2, 3], dtype='int'), (6,1))\n for face in range(6):\n indices[face] += (face * 4)\n indices.shape = (-1,)\n elif type == 'triangle_strip':\n raise NotImplementedError\n elif type == 'triangle_fan':\n raise NotImplementedError\n elif type == 'quads':\n raise NotImplementedError\n elif type == 'quad_strip':\n raise NotImplementedError\n else:\n raise ValueError('Unknown type')\n\n return data, indices", "def draw_cube(self, points, color=(255, 0, 0)):\n\n # draw front\n self.draw_line(points[0], points[1], color)\n self.draw_line(points[1], points[2], color)\n self.draw_line(points[3], points[2], color)\n self.draw_line(points[3], points[0], color)\n\n # draw back\n self.draw_line(points[4], points[5], color)\n self.draw_line(points[6], points[5], color)\n self.draw_line(points[6], points[7], color)\n self.draw_line(points[4], points[7], color)\n\n # draw sides\n self.draw_line(points[0], points[4], color)\n self.draw_line(points[7], points[3], color)\n self.draw_line(points[5], points[1], color)\n self.draw_line(points[2], points[6], color)\n\n # draw dots\n self.draw_dot(points[0], point_color=color, point_radius=4)\n self.draw_dot(points[1], point_color=color, point_radius=4)\n\n # draw x on the top\n self.draw_line(points[0], points[5], color)\n self.draw_line(points[1], points[4], color)", "def __init__(self, x, y, width, height, color=(255, 255, 255, 255),\n batch=None, group=None):\n self._x = x\n self._y = y\n self._width = width\n self._height = height\n self._rotation = 0\n self._num_verts = 6\n\n r, g, b, *a = color\n self._rgba = r, g, b, a[0] if a else 255\n\n program = get_default_shader()\n self._batch = batch or Batch()\n self._group = self.group_class(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, program, group)\n\n self._create_vertex_list()\n self._update_vertices()", "def createBox( size=(1,1,1), position=(0,0,0), colour=(0.6,0.6,0.6) ):\r\n \r\n size = PyUtils.toVector3d(size)\r\n position = PyUtils.toPoint3d(position)\r\n vertices = []\r\n delta = MathLib.Vector3d()\r\n for repeat in range(3):\r\n for x in (-0.5,0.5) :\r\n delta.x = size.x * x\r\n for y in (-0.5,0.5) :\r\n delta.y = size.y * y\r\n for z in (-0.5,0.5) :\r\n delta.z = size.z * z\r\n vertices.append( position + delta )\r\n \r\n faces = [(0,1,3,2),(5,4,6,7), # YZ Faces\r\n (9,13,15,11),(12,8,10,14), # XY Faces\r\n (18,19,23,22),(17,16,20,21)] # XZ Faces\r\n \r\n return create( vertices, faces, colour )", "def getCube(unique_name):", "def corridor(x,z, emap, width=10, length=10, height=10, details=None, walls=\"ns\", name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n solid_objects = []\r\n\r\n if \"n\" in walls:\r\n # TODO: abstract out the mostly-duplicate code in these cases...\r\n nwall = SolidObject(name+str(wallnum),\r\n Size(length, height, 1),\r\n Position(x, emap.calcHeight(x, z) + height / 2, n-0.5), 0)\r\n solid_objects.append(nwall)\r\n nwallmodel = createMyCuboid(nwall.w() * 2, nwall.h() * 2, nwall.d() * 2,\r\n name=name+str(wallnum),\r\n x=nwall.x(),y=nwall.y(),z=nwall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(nwallmodel)\r\n else:\r\n nwall.setmodel(nwallmodel, details)\r\n\r\n\r\n wallnum += 1\r\n\r\n if \"s\" in walls:\r\n swall = SolidObject(name+str(wallnum), Size(length, height, 1), Position(x, emap.calcHeight(x, z)+height / 2, s+0.5), 0)\r\n solid_objects.append(swall)\r\n swallmodel = createMyCuboid(swall.w()*2, swall.h()*2, swall.d()*2,\r\n name=name+str(wallnum),\r\n x=swall.x(), y=swall.y(), z=swall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0,cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(swallmodel)\r\n else:\r\n swall.setmodel(swallmodel, details)\r\n\r\n wallnum += 1\r\n\r\n if \"e\" in walls:\r\n ewall = SolidObject(name+str(wallnum), Size(1, height, width), Position(e-0.5, emap.calcHeight(x, z)+height / 2, z), 0)\r\n solid_objects.append(ewall)\r\n ewallmodel = createMyCuboid(ewall.w()*2, ewall.h()*2, ewall.d()*2,\r\n name=name+str(wallnum),\r\n x=ewall.x(), y=ewall.y(), z=ewall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(ewallmodel)\r\n else:\r\n ewall.setmodel(ewallmodel, details)\r\n\r\n wallnum += 1\r\n\r\n if \"w\" in walls:\r\n wwall = SolidObject(name+str(wallnum), Size(1, height, width), Position(w+0.5, emap.calcHeight(x, z)+height / 2, z), 0)\r\n solid_objects.append(wwall)\r\n wwallmodel = createMyCuboid(wwall.w()*2, wwall.h()*2, wwall.d()*2,\r\n name=name+str(wallnum),\r\n x=wwall.x(), y=wwall.y(), z=wwall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(wwallmodel)\r\n else:\r\n wwall.setmodel(wwallmodel, details)\r\n wallnum += 1\r\n\r\n if \"o\" not in walls:\r\n ceiling = SolidObject(name+str(wallnum), Size(length, 1, width), Position(x, emap.calcHeight(x, z)+height+0.5, z), 0)\r\n solid_objects.append(ceiling)\r\n ceilingmodel = createMyCuboid(ceiling.w()*2, ceiling.h()*2, ceiling.d()*2,\r\n name=name+str(wallnum),\r\n x=ceiling.x(), y=ceiling.y(), z=ceiling.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(ceilingmodel)\r\n else:\r\n ceiling.setmodel(ceilingmodel, details)\r\n\r\n wallnum += 1\r\n\r\n return solid_objects", "def create_icosaedron():\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n try:\n glutSolidIcosahedron()\n except:\n if not _ERRS[8]:\n printGLError(\n \"la version actual de OpenGL no posee la funcion glutSolidIcosahedron\")\n _ERRS[8] = True\n glPopMatrix()\n glEndList()\n return obj", "def show_crisscross(self, mole_object):\r\n if mole_object.plugin_type == \"PyMOL\":\r\n obj = [\r\n LINEWIDTH, 3,\r\n\r\n BEGIN, LINE_STRIP,\r\n VERTEX, float(float(self.point_x.get()) - 0.5), float(self.point_y.get()), float(self.point_z.get()),\r\n VERTEX, float(float(self.point_x.get()) + 0.5), float(self.point_y.get()), float(self.point_z.get()),\r\n END,\r\n\r\n BEGIN, LINE_STRIP,\r\n VERTEX, float(self.point_x.get()), float(float(self.point_y.get()) - 0.5), float(self.point_z.get()),\r\n VERTEX, float(self.point_x.get()), float(float(self.point_y.get()) + 0.5), float(self.point_z.get()),\r\n END,\r\n\r\n BEGIN, LINE_STRIP,\r\n VERTEX, float(self.point_x.get()), float(self.point_y.get()), float(float(self.point_z.get()) - 0.5),\r\n VERTEX, float(self.point_x.get()), float(self.point_y.get()), float(float(self.point_z.get()) + 0.5),\r\n END\r\n\r\n ]\r\n\r\n PymolPlugin.PymolPlugin().delete(self.point_name)\r\n view = PymolPlugin.PymolPlugin().get_view()\r\n PymolPlugin.PymolPlugin().load_CGO(obj, self.point_name)\r\n PymolPlugin.PymolPlugin().set_view(view)\r\n\r\n else:\r\n chimera_model_number = int(mole_object.input_structure_box.index('active')) - 1\r\n ChimeraPlugin.ChimeraPlugin().make_icosahedron(str(chimera_model_number), float(self.point_x.get()),\r\n float(self.point_y.get()), float(self.point_z.get()))", "def drawCube( self ):\n glBegin(GL_QUADS);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()", "def draw_square(self, surface, color, position):\n rect = pygame.Rect(position, (50, 50))\n pygame.draw.rect(surface, color, rect)", "def box(x, y, z):\n global _cmds\n _cmds = (f\"cube({[x,y,z]},\"\n f\"center=false);\\n\\n\") + _cmds", "def draw( self ):\r\n print \"Drawing cuboid!\"\r\n glTranslated( *self.pos3D ) # This moves the origin of drawing , so that we can use the above coordinates at each draw location\r\n if self.rotnByOGL:\r\n glRotated( self.thetaDeg , *self.rotAxis )\r\n # glTranslated( 0 , 0 , 0 ) # This moves the origin of drawing , so that we can use the above coordinates at each draw location\r\n print \"DEBUG:\" , \"Translated to\" , 0 , 0 , 0\r\n glColor3ub( *self.color ) # Get the color according to the voxel type\r\n print \"DEBUG:\" , \"Set color to\" , self.color\r\n pyglet.graphics.draw_indexed( \r\n 8 , # --------------------- Number of seqential triplet in vertex list\r\n GL_QUADS , # -------------- Draw quadrilaterals\r\n self.indices , # ---------- Indices where the coordinates are stored\r\n ( 'v3f' , self.vertX ) # vertex list , OpenGL offers an optimized vertex list object , but this is not it\r\n ) # 'v3i' # This is for integers I suppose!\r\n \r\n glColor3ub( *self.colorLine )\r\n pyglet.gl.glLineWidth( 3 )\r\n pyglet.graphics.draw_indexed( \r\n 8 , # --------------------- Number of seqential triplet in vertex list\r\n GL_LINES , # -------------- Draw quadrilaterals\r\n self.linDices , # ---------- Indices where the coordinates are stored\r\n ( 'v3f' , self.vertX ) # vertex list , OpenGL offers an optimized vertex list object , but this is not it\r\n ) # 'v3i' # This is for integers I suppose!\r\n \r\n print \"DEBUG:\" , \"Indices\"\r\n print self.indices \r\n print \"DEBUG:\" , \"Vertices\"\r\n print self.vertices \r\n \"\"\" URL: http://pyglet.readthedocs.io/en/pyglet-1.2-maintenance/programming_guide/graphics.html#vertex-lists\r\n \r\n There is a significant overhead in using pyglet.graphics.draw and pyglet.graphics.draw_indexed due to pyglet \r\n interpreting and formatting the vertex data for the video device. Usually the data drawn in each frame (of an animation) \r\n is identical or very similar to the previous frame, so this overhead is unnecessarily repeated.\r\n \r\n A VertexList is a list of vertices and their attributes, stored in an efficient manner that’s suitable for direct \r\n upload to the video card. On newer video cards (supporting OpenGL 1.5 or later) the data is actually stored in video memory.\r\n \"\"\"\r\n if self.rotnByOGL:\r\n glRotated( -self.thetaDeg , *self.rotAxis )\r\n glTranslated( *np.multiply( self.pos3D , -1 ) ) # Reset the transform coordinates\r\n print \"DEBUG:\" , \"Translated to\" , 0 , 0 , 0\r\n print \"Done drawing!\"", "def _make_test_cube(long_name):\n cs = GeogCS(EARTH_RADIUS)\n data = np.array([[1.0, 1.0, 1.0], [0.0, 0.0, 0.0], [1.0, 0.0, 1.0]])\n cube = Cube(data, long_name=long_name)\n x_coord = DimCoord(\n np.linspace(-45.0, 45.0, 3), \"latitude\", units=\"degrees\", coord_system=cs\n )\n y_coord = DimCoord(\n np.linspace(120, 180, 3), \"longitude\", units=\"degrees\", coord_system=cs\n )\n cube.add_dim_coord(x_coord, 0)\n cube.add_dim_coord(y_coord, 1)\n return cube", "def __init__(self, x, y, outer_radius, inner_radius, num_spikes, rotation=0,\n color=(255, 255, 255, 255), batch=None, group=None) -> None:\n self._x = x\n self._y = y\n self._outer_radius = outer_radius\n self._inner_radius = inner_radius\n self._num_spikes = num_spikes\n self._num_verts = num_spikes * 6\n self._rotation = rotation\n\n r, g, b, *a = color\n self._rgba = r, g, b, a[0] if a else 255\n\n program = get_default_shader()\n self._batch = batch or Batch()\n self._group = self.group_class(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, program, group)\n\n self._create_vertex_list()\n self._update_vertices()", "def make_ball(self, scale, color):\n sphere = Sphere()\n sphere.set_location(0, 0, 0)\n sphere.set_size(scale, scale, scale)\n sphere.set_color(c=color)\n return sphere", "def make_ball(self, scale, color):\n sphere = Sphere()\n sphere.set_location(0, 0, 0)\n sphere.set_size(scale, scale, scale)\n sphere.set_color(c=color)\n return sphere", "def push_color(self, color):\n self[color.name] = color\n # for every added new color, set the map as colored\n self.black_and_white = False", "def drawCube(self):\r\n glBegin(GL_QUADS);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(-1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(1.0, 1.0, 1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(-1.0, 1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(-1.0, -1.0, -1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(-1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(1.0, -1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(-1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(-1.0, 1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(1.0, 1.0, 1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(1.0, 1.0, -1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(-1.0, -1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(1.0, -1.0, -1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(-1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(1.0, -1.0, -1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(1.0, 1.0, 1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(1.0, -1.0, 1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(-1.0, -1.0, -1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(-1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(-1.0, 1.0, 1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(-1.0, 1.0, -1.0);\r\n glEnd()", "def place_obj(self):\r\n for pos in BOARD_POSITIONS:\r\n self.board[pos[0]][pos[1]] = Stone(color=self.state[pos[0]][pos[1]], pos=(pos[0], pos[1]))\r\n self.board[pos[0]][pos[1]].liberty = self.board[pos[0]][pos[1]].compute_liberty(self.state)", "def drawCube( self ):\n glBegin(GL_QUADS);\n mTexture(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()", "def put_color(self, _pos, _color):\n assert(((len(_pos) == 2) and (len(_color) == self.__resolution[2])) or\n ((len(_pos) == 3) and (len(_color) == 1)))\n self.__framebuffer[_pos] = _color", "def sphere_cpk(molecule, colormode='discrete'):\n colormode = colormode.lower()\n msp_mapper = OpenGLMoleculeMapper()\n msp_mapper.SetInputData(molecule)\n msp_mapper.SetRenderAtoms(True)\n msp_mapper.SetRenderBonds(False)\n msp_mapper.SetAtomicRadiusTypeToVDWRadius()\n msp_mapper.SetAtomicRadiusScaleFactor(1)\n if colormode == 'discrete':\n msp_mapper.SetAtomColorMode(1)\n elif colormode == 'single':\n msp_mapper.SetAtomColorMode(0)\n else:\n msp_mapper.SetAtomColorMode(1)\n warnings.warn('Incorrect colormode specified! Using discrete.')\n\n # To-Do manipulate shading properties to make it look aesthetic\n molecule_actor = Actor()\n molecule_actor.SetMapper(msp_mapper)\n return molecule_actor", "def create_sphere(lat=10, lng=10, color=COLOR_WHITE):\n if lat >= 3 and lng >= 3:\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n glColor4fv(color)\n try:\n glutSolidSphere(1.0, lat, lng)\n except:\n if not _ERRS[0]:\n printGLError(\n \"la version actual de OpenGL no posee la funcion glutSolidSphere\")\n _ERRS[0] = True\n glPopMatrix()\n glEndList()\n return obj\n else:\n raise Exception(\n \"La latitud y longitud de la figura deben ser mayores a 3\")", "def set_at(self, pos: Tuple2NumberType, color: ColorInputType) -> 'BaseImage':\n assert_vector(pos, 2)\n self._surface.set_at(pos, assert_color(color))\n return self", "def __init__(self, *coordinates, color=(255, 255, 255, 255), batch=None, group=None):\n\n # len(self._coordinates) = the number of vertices and sides in the shape.\n self._rotation = 0\n self._coordinates = list(coordinates)\n self._num_verts = (len(self._coordinates) - 2) * 3\n\n r, g, b, *a = color\n self._rgba = r, g, b, a[0] if a else 255\n\n program = get_default_shader()\n self._batch = batch or Batch()\n self._group = self.group_class(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, program, group)\n\n self._create_vertex_list()\n self._update_vertices()", "def __init__(self, x, y, color):\n if x < 0:\n raise ValueError('X must be a positive int')\n if y < 0:\n raise ValueError('Y must be a positive int')\n self.top_left_x = x\n self.top_left_y = y\n self.bounding_box_x_len = 0\n self.bounding_box_y_len = 0\n self.num_colored_cells = 1\n self.color = color\n\n # The cells are added via relative position to the top left corner.\n # This way later the groups can be compared without their relative positioning mattering.\n # TODO Must find a way to represent this in such a way that the matrix can be rotated 90 degrees or mirrored\n # over an arbitrary value and for a useful comparison to be made\n self.cells = np.zeros((1, 1), dtype=np.int32)\n self.cells[0][0] = color", "def add_clay_to_selected(self):\n scene = self.set_as_active()\n\n # if the user selected a material, use it\n if w_var.cb_mat_clay:\n clay_mat = bpy.data.materials[w_var.mat_clay_name]\n\n # else, create a new one with the color selected\n else:\n clay_color = w_var.color_clay\n\n # separating rgb and alpha\n clay_color_rgb = clay_color[0:3]\n clay_color_alpha = clay_color[-1]\n clay_mat = bpy.data.materials.new('clay')\n \n renderengine = scene.wirebomb.data_renderengine\n \n if renderengine == 'CYCLES':\n clay_mat.use_nodes = True\n tree = clay_mat.node_tree\n tree.nodes.clear()\n\n # creating the nodes\n node_transparent = tree.nodes.new('ShaderNodeBsdfTransparent')\n node_transparent.location = -300, 100\n\n node_diffuse = tree.nodes.new('ShaderNodeBsdfDiffuse')\n node_diffuse.location = -300, -100\n node_diffuse.inputs[0].default_value = clay_color_rgb + (1.0, )\n node_diffuse.color = clay_color_rgb\n node_diffuse.name = 'addon_clay_color' # referencing to this ID in the real-time change\n\n node_mixshader = tree.nodes.new('ShaderNodeMixShader')\n node_mixshader.location = 0, 50\n node_mixshader.inputs[0].default_value = clay_color_alpha\n node_mixshader.name = 'addon_clay_alpha' # referencing to this ID in the real-time change\n\n node_output = tree.nodes.new('ShaderNodeOutputMaterial')\n node_output.location = 300, 50\n\n # connecting the nodes\n tree.links.new(node_transparent.outputs[0], node_mixshader.inputs[1])\n tree.links.new(node_diffuse.outputs[0], node_mixshader.inputs[2])\n tree.links.new(node_mixshader.outputs[0], node_output.inputs[0])\n\n for node in tree.nodes:\n node.select = False\n\n # sets the viewport color\n clay_mat.diffuse_color = clay_color_rgb\n \n elif renderengine == 'BLENDER_RENDER':\n clay_mat.diffuse_color = clay_color_rgb\n clay_mat.use_transparency = True\n clay_mat.alpha = clay_color_alpha\n\n previous_area = bpy.context.area.type\n bpy.context.area.type = 'VIEW_3D'\n previous_layers = tuple(scene.layers)\n\n # can't enter edit mode on objects on inactive layers\n scene.layers = (True,)*20\n\n for obj in scene.objects:\n if obj.select:\n # only enters edit mode on active object\n scene.objects.active = obj\n obj.data.materials.append(clay_mat)\n clay_index = obj.data.materials.find(clay_mat.name)\n obj.active_material_index = clay_index\n\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.object.material_slot_assign()\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.object.mode_set(mode='OBJECT')\n\n bpy.context.area.type = previous_area\n scene.layers = previous_layers\n\n return clay_mat", "def cubify(\n arr: xr.DataArray,\n *spatial_dims: str,\n pixel_dim: Hashable = 'pixel'\n ):\n if not spatial_dims:\n spatial_dims = ('x', 'y')\n cube = arr.set_index({pixel_dim: spatial_dims}).unstack(pixel_dim) # type: ignore[union-attr]\n for d in spatial_dims:\n cube.coords[d].attrs = arr.coords[d].attrs\n return cube", "def cuboctahedron(self):\n one = Integer(1)\n v = [ [0, -one/2, -one/2], [0, one/2, -one/2], [one/2, -one/2, 0], \n [one/2, one/2, 0], [one/2, 0, one/2], [one/2, 0, -one/2], \n [0, one/2, one/2], [0, -one/2, one/2], [-one/2, 0, one/2],\n [-one/2, one/2, 0], [-one/2, 0, -one/2], [-one/2, -one/2, 0] ]\n return Polyhedron(vertices = v)", "def cuboids_with_side_m(m):\n return ((x, y, m) for x in xrange(1, m + 1) for y in xrange(x, m + 1))", "def molmapCube(model_id, resolution):\n\n ####create grid and save as model\n #get gabarities\n min_x, max_x, min_y, max_y, min_z, max_z = getGabarities(model_id)\n #create empty cube map\n min_cor = min(min_x,min_y,min_z)\n max_cor = max(max_x,max_y,max_z)\n d_grid = resolution/3\n\n\n\n #run molmap\n molmap_com = 'molmap #'+str(model_id) + ' ' + str(resolution)+' gridSpacing ' + str(resolution/3.0)\n chimera.runCommand(molmap_com)\n map_orig = active_volume();\n\n # interpolation\n createCubeMapfromGivenMap(map_orig,min_cor, max_cor, d_grid)\n\n #delete the grid\n map_orig.destroy()", "def __init__(self, game, left, right, bottom, top, col=\"black\"):\n\n # Assign given attributes (ensuring order of coordinates)\n self.game = game\n self.canvas = game.canvas # canvas to draw self on\n self._left = min(left, right)\n self._right = max(left, right)\n self._bottom = min(bottom, top)\n self._top = max(bottom, top)\n self.color = col\n\n # Draw the block\n self._draw()", "def mlab_plt_cube(xmin, xmax, ymin, ymax, zmin, zmax):\n faces = cube_faces(xmin, xmax, ymin, ymax, zmin, zmax)\n for grid in faces:\n x, y, z = grid\n mlab.mesh(x, y, z, opacity=0.1, color=(0.1, 0.2, 0.3))", "def test_smallcube_in_bigcube(self):\n w = mt.createCube(marker=1)\n c = mt.createCube(size=[0.5, 1.0, 1.0], marker=2)\n\n w = mt.mergePLC3D([w, c])\n self.assertEqual(w.nodeCount(), 8+8)\n self.assertEqual(w.boundaryCount(), 8)\n\n # will not work until edge intersection is working\n # d = mt.createCube(size=[0.8, 1.0, 1.0],\n # pos=[0.1, 0.0, 1.0],\n # marker=3)\n # w = mt.mergePLC3D([w, d])\n # self.assertEqual(w.nodeCount(), 8+8)\n # self.assertEqual(w.boundaryCount(), 8)\n\n # print(w)\n pg.show(w)\n pg.show(mt.createMesh(w))", "def draw_cube(self, window):\n size = pygame.display.get_surface().get_size()\n width = (size[0]/4)\n\n window.fill((000,000,000))\n\n self.draw_face(\"U\", window, (0 + (width*1), 0 + (width*0)), width)\n self.draw_face(\"L\", window, (0 + (width*0), 0 + (width*1)), width)\n self.draw_face(\"F\", window, (0 + (width*1) * 1, 0 + (width*1)), width)\n self.draw_face(\"R\", window, (0 + (width*2), 0 + (width*1)), width)\n self.draw_face(\"B\", window, (0 + (width*3), 0 + (width*1)), width)\n self.draw_face(\"D\", window, (0 + (width*1), 0 + (width*2)), width)\n\n pygame.display.update()", "def __init__(self, x, y, width, height, color):\n self._x = x\n self._y = y\n self._width = width\n self._height = height\n self._color = color", "def marching_cubes(self, spc=0.02):\n\n mb_meshgrid, xyz_spc = self.get_mb_meshgrid(spc)\n\n verts, faces, normals, values = measure.marching_cubes(\n mb_meshgrid,\n level=0.0,\n spacing=xyz_spc,\n gradient_direction='ascent',\n step_size=1)\n\n verts += np.c_[self.xmin, self.ymin, self.zmin]\n\n self.verts = verts\n self.faces = faces\n self.normals = normals\n self.values = values\n self.sa = measure.mesh_surface_area(verts, faces)", "def genCubes():\n offset = vpy.vector(.5, .5, .5)\n size = vpy.vector(.2, .2, .2)\n B1 = vpy.box(pos=vpy.vector(0, 0, 0)-offset,\n color=vpy.vector(0, 0, 0), size=size, make_trail=True)\n B2 = vpy.box(pos=vpy.vector(0, 0, 1)-offset,\n color=vpy.vector(0, 0, 1), size=size, make_trail=True)\n B3 = vpy.box(pos=vpy.vector(0, 1, 1)-offset,\n color=vpy.vector(0, 1, 1), size=size, make_trail=True)\n B4 = vpy.box(pos=vpy.vector(0, 1, 0)-offset,\n color=vpy.vector(0, 1, 0), size=size, make_trail=True)\n\n B5 = vpy.box(pos=vpy.vector(1, 0, 0)-offset,\n color=vpy.vector(1, 0, 0), size=size, make_trail=True)\n B6 = vpy.box(pos=vpy.vector(1, 0, 1)-offset,\n color=vpy.vector(1, 0, 1), size=size, make_trail=True)\n B7 = vpy.box(pos=vpy.vector(1, 1, 0)-offset,\n color=vpy.vector(1, 1, 0), size=size, make_trail=True)\n B8 = vpy.box(pos=vpy.vector(1, 1, 1)-offset,\n color=vpy.vector(1, 1, 1), size=size, make_trail=True)\n\n return [B1, B2, B3, B4, B5, B6, B7, B8]", "def __init__(self, x, y, width, height, color, name):\n self.__class__.instances.append(self)\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.color = color\n self.name = name", "def __init__(self, master, x, y, size, colour):\n self.master = master\n self.abs = x\n self.ord = y\n self.size= size\n self.fill= colour", "def solid(t, coord, ii, n_pixels, random_values):\n\n\n return (100,100,100)", "def __init__(self, field, color):\n Figure.__init__(self, field, color)\n self.pot_moves = {(i, -i) for i in range(-7, 0)} \\\n | {(i, -i) for i in range(1, 8)} \\\n | {(i, i) for i in range(-7, 0)} \\\n | {(i, i) for i in range(1, 8)}\n self.value = 3.5 * self.color_value\n self.short_name = self.color[0] + \"B\"", "def imageCube(imagepath=None,position=(0,0,0),initialorientation='x'):\n\n # first parse the image path into folder and basename\n\n bpy.ops.import_image.to_plane(files=[{'name':imagepath}])\n obj0 = bpy.context.object\n\n obj0.location = position\n obj0.location.x = obj0.location.x - 0.5\n obj1 = copyObject(obj0,newlocation=(1,0,0))\n obj0.rotation_euler = (0,math.pi/2,0)\n obj1.rotation_euler = (0,-math.pi/2,0)\n\n\n obj2 = copyObject(obj0,newlocation=(0.5,-0.5,0))\n obj3 = copyObject(obj0,newlocation=(0.5,0.5,0))\n obj2.rotation_euler = (math.pi/2,0,0)\n obj3.rotation_euler = (-math.pi/2,0,0)\n\n\n obj4 = copyObject(obj0,newlocation=(0.5,0,0.5))\n obj5 = copyObject(obj0,newlocation=(0.5,0,-0.5))\n obj4.rotation_euler = (0,0,0)\n obj5.rotation_euler = (0,-math.pi,0)", "def add_cube(self, cube, name, size=None, lbda=None, add_white=False,\n unit_size=u.arcsec, unit_wave=u.angstrom):\n if size is None:\n size = self.default_size\n unit_size = u.arcsec\n\n subcub = cube.subcube(center=(self.dec, self.ra), size=size,\n unit_center=u.deg, unit_size=unit_size)\n\n if add_white:\n self.images['MUSE_WHITE'] = subcub.mean(axis=0)\n\n if lbda is not None:\n subcub = subcub.select_lambda(lbda[0], lbda_max=lbda[1],\n unit_wave=unit_wave)\n\n self.cubes[name] = subcub", "def append_vertex(remote, objectid, position, normal=(0,1,0), color=(1,1,1) ):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_AppendVertex(objectid, to_vec3f(position), to_vec3f(normal), to_vec3f(color) )\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_AppendVertex(key1, result_val)\n return result_val.i", "def Cube(self, lower_left_rear_point=(0.,0.,0.), side_length=1, nx=5, ny=5, nz=5, n=None, element_type=\"hex\"):\n\n if n != None:\n nx,ny,nz = n,n,n\n\n upper_right_front_point = (side_length+lower_left_rear_point[0],\n side_length+lower_left_rear_point[1],side_length+lower_left_rear_point[2])\n self.Parallelepiped(lower_left_rear_point=lower_left_rear_point,\n upper_right_front_point=upper_right_front_point,nx=nx,ny=ny,nz=nz,element_type=element_type)", "def place_object(self, thing):\n color = [i * 255 for i in thing.color.rgb]\n size = (20, 20)\n if thing.name == \"luna\":\n size = (5, 5)\n if self.is_visible(thing.position, max(size)):\n position = self.get_position(thing.position, size)\n pygame.draw.ellipse(self.screen, color, (position, size))", "def __init__(self, origin = Vector(0,0,0), color=Colors.White) -> None:\n self.origin = origin\n self.color = color", "def __init__(self, r, g, b, struct=None):\n self._intern = struct or dlib.Color(r,g,b)", "def __init__(self, field, color):\n Figure.__init__(self, field, color)\n self.pot_capture_moves = {(1 * self.color_value, 1),\n (1 * self.color_value, -1)}\n self.pot_moves = {(1 * self.color_value, 0),\n (2 * self.color_value, 0)}\n self.value = 1 * self.color_value\n self.short_name = self.color[0] + \"p\"", "def __init__(self, field, color):\n Figure.__init__(self, field, color)\n self.pot_moves = {(i, 0) for i in range(-7, 0)} \\\n | {(i, 0) for i in range(1, 8)} \\\n | {(0, i) for i in range(-7, 0)} \\\n | {(0, i) for i in range(1, 8)}\n self.value = 5 * self.color_value\n self.short_name = self.color[0] + \"R\"", "def __init__(self, width, height, color, main_surface):\n self.main_surface = main_surface\n self.width = width\n self.height = height\n self.color = color", "def make_cube(r, g, b):\n if r is None and g is None and b is None:\n logger.error(\"'make_cube': 'r', 'g' and 'b' input arrays are all None\")\n R, G, B, RGB = None\n\n else:\n for arr in [r, g, b]:\n if arr is not None:\n ny, nx = arr.shape\n break\n\n R = np.zeros([ny, nx, 3])\n R[:, :, 0] = r\n G = np.zeros_like(R)\n G[:, :, 1] = g\n B = np.zeros_like(R)\n B[:, :, 2] = b\n\n RGB = R + G + B\n\n return R, G, B, RGB", "def _hash_color(obj):\n name_hash = hash(obj.name[:2])\n color = (\n (name_hash >> 16) % 256,\n (name_hash >> 8) % 256,\n name_hash % 256\n )\n mat_name = \"#%02X%02X%02X\" % color\n mat = (\n bpy.data.materials[mat_name] if mat_name in bpy.data.materials\n else bpy.data.materials.new(mat_name)\n )\n mat.diffuse_color = tuple([i / 256 for i in color])\n obj.data.materials.append(mat)", "def generate_cube():\n \n num_voxels = 31\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if 5 < x < 10 and 5 < y < 10:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume", "def draw_multicolor_square(t,sz):\r\n for i in [\"red\", \"purple\", \"hotpink\", \"blue\"]:\r\n t.color(i)\r\n t.forward(sz)\r\n t.left(90)", "def __init__(self, colour):\n self.colour = colour\n self.board = Board()\n self.pieces = self.assign_pieces()\n self.strategy = Strategy()", "def push_color(color): # (9)\n color_buffer.appendleft(color)", "def __init__(self, field, color):\n Figure.__init__(self, field, color)\n self.pot_moves = {(i, 0) for i in range(-7, 0)} \\\n | {(i, 0) for i in range(1, 8)} \\\n | {(0, i) for i in range(-7, 0)} \\\n | {(0, i) for i in range(1, 8)} \\\n | {(i, -i) for i in range(-7, 0)} \\\n | {(i, -i) for i in range(1, 8)} \\\n | {(i, i) for i in range(-7, 0)} \\\n | {(i, i) for i in range(1, 8)}\n self.value = 9 * self.color_value\n self.short_name = self.color[0] + \"Q\"", "def __init__(s,i,j):\n # Posição do centro\n s.cx, s.cy = convert(i,j)\n # Cor (pode ser passada para o construtor no futuro)\n s.cor = (200,200,200)\n\n # Vértices do hexágono\n s.pontos = (\n (s.cx, s.cy-L),\n (s.cx+l, s.cy-L/2),\n (s.cx+l, s.cy+L/2),\n (s.cx, s.cy+L),\n (s.cx-l, s.cy+L/2),\n (s.cx-l, s.cy-L/2),\n )", "def draw_mpo(self):\n\n x = int(self.row[self.row_index])\n y = int(self.row[self.row_index + 1])\n pt = x,y\n color = 0\n if self.mpo_color == 0:\n color = self.RED\n else:\n color = self.WHITE # white during desired sub-trajectory\n pygame.draw.circle(self.screen,color,pt,10)", "def color_vertex(obj, color):\r\n\t\r\n\tmesh = obj.data\r\n\tscn = bpy.context.scene\r\n\t\r\n\t#we need to make sure it's the active object\r\n\tscn.objects.active = obj\r\n\tobj.select = True\r\n\tvcol_layer = mesh.vertex_colors.active\r\n\tfor poly in mesh.polygons:\r\n\t\tfor loop_index in poly.loop_indices:\r\n\t\t\tloop_vert_index = mesh.loops[loop_index].vertex_index\r\n\t\t\tvcol_layer.data[loop_index].color = color\r\n\t\r\n\treturn 0", "def __init__(self, c):\n self.coords = c[:4]\n self.center = c[4]\n\n # init width and height of block\n widthAndHeight(self)\n\n self.x = randrange((pyxel.width - self.width)/4)\n self.y = 0\n self.vy = 32\n self.falling = True\n\n # init random color\n self.color = randrange(2, 15)\n\n # Add block to posMap\n mapAdd(self, theFallen)\n\n self.frame = pyxel.frame_count", "def __init__(self, field, color):\n Figure.__init__(self, field, color)\n self.pot_moves = {(2, 1), (1, 2), (-1, 2), (-2, 1),\n (-2, -1), (-1, -2), (1, -2), (2, -1)}\n self.value = 2.5 * self.color_value\n self.short_name = self.color[0] + \"N\"", "def to_pycuber(self) -> pycuber.Cube:\n self.soft_align_faces()\n qpos_copy = self.sim.data.qpos.copy()\n\n cubies = []\n\n for i in range(27):\n cubelet_meta = self.cubelet_meta_info[i]\n\n if cubelet_meta[\"type\"] == \"cubelet\":\n mtx = self._cubelet_rotation_matrix(cubelet_meta, qpos_copy)\n\n original_coords = cubelet_meta[\"coords\"]\n # current_coords = (mtx @ cubelet_meta['coords'].astype(float)).round().astype(int)\n\n cubie_desc = {}\n\n for prev_axis, sign in enumerate(original_coords):\n if sign != 0:\n vec = mtx[:, prev_axis] * sign\n new_axis = np.abs(vec).argmax()\n new_sign = vec[new_axis]\n\n color = PYCUBER_REVERSE_COLORS[prev_axis, sign]\n loc = PYCUBER_REVERSE_LOCATIONS[new_axis, new_sign]\n\n cubie_desc[loc] = pycuber.Square(color)\n\n if len(cubie_desc) == 3:\n cubies.append(pycuber.Corner(**cubie_desc))\n elif len(cubie_desc) == 2:\n cubies.append(pycuber.Edge(**cubie_desc))\n if cubelet_meta[\"type\"] == \"driver\":\n original_coords = cubelet_meta[\"coords\"]\n axis = np.abs(original_coords).argmax()\n sign = original_coords[axis]\n\n color = PYCUBER_REVERSE_COLORS[axis, sign]\n loc = PYCUBER_REVERSE_LOCATIONS[axis, sign]\n\n cubie_desc = {loc: pycuber.Square(color)}\n cubies.append(pycuber.Centre(**cubie_desc))\n\n return pycuber.Cube(cubies=cubies)", "def test_color__color_object_arg(self):\n color_args = (10, 20, 30, 40)\n color_obj = pygame.Color(*color_args)\n\n new_color_obj = pygame.Color(color_obj)\n\n self.assertIsInstance(new_color_obj, pygame.Color)\n self.assertEqual(new_color_obj, color_obj)\n self.assertEqual(new_color_obj.r, color_args[0])\n self.assertEqual(new_color_obj.g, color_args[1])\n self.assertEqual(new_color_obj.b, color_args[2])\n self.assertEqual(new_color_obj.a, color_args[3])", "def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))", "def __init__(self):\n\n super(ColorMap, self).__init__()\n self.by_id = dict()\n\n for color in [Color.white(), Color.black()]:\n self.push_color(color)\n\n # only black and white are added ny now\n self.black_and_white = True" ]
[ "0.6723132", "0.6348297", "0.60369486", "0.5786166", "0.57097834", "0.5662898", "0.56111765", "0.5570667", "0.5565409", "0.5560575", "0.55361265", "0.5518773", "0.5501009", "0.5469816", "0.54623425", "0.54447734", "0.54447734", "0.5434046", "0.54167914", "0.53902054", "0.53896713", "0.5383581", "0.53749996", "0.53689116", "0.5365708", "0.53643614", "0.5325049", "0.5314483", "0.5308681", "0.5291437", "0.5285784", "0.52728647", "0.5248038", "0.5239186", "0.5224237", "0.5196999", "0.51855624", "0.5183042", "0.5171356", "0.5149097", "0.51438636", "0.51420873", "0.5125317", "0.51168454", "0.51119167", "0.5071683", "0.5064401", "0.5064401", "0.50603247", "0.5058029", "0.5048537", "0.50446916", "0.50368005", "0.5036502", "0.5036487", "0.5025154", "0.5016106", "0.50153196", "0.5008129", "0.49990493", "0.4993229", "0.49926078", "0.49874458", "0.49755904", "0.49694508", "0.49629524", "0.49574995", "0.4955997", "0.49479508", "0.49436608", "0.49349967", "0.49289325", "0.4928473", "0.49254233", "0.4921238", "0.49147072", "0.4914676", "0.48986882", "0.48968646", "0.4892024", "0.48886764", "0.48847213", "0.48814005", "0.4881316", "0.48769173", "0.48768994", "0.48744538", "0.48690626", "0.4851603", "0.48515308", "0.48468938", "0.48416924", "0.48405424", "0.4835879", "0.4828816", "0.482681", "0.48267063", "0.48168233", "0.48128703", "0.48114854" ]
0.77502567
0
Checks that the GsmModem in PDU mode accepts outgoing SMS, when the text is within ASCII chars 22 126.
def testSendSmsPduMode(self): # setup expectation to raise a timeout error with prompt err = errors.GsmReadTimeoutError(">") when(self.mockDevice).read_lines().thenRaise(err).thenReturn(self.oklines) self.gsm.send_sms("1234", "Test Message") # must see command with size verify(self.mockDevice, times=1).write("AT+CMGS=21\r") # must see command with text and terminating char verify(self.mockDevice, times=1).write("00110004A821430000AA0CD4F29C0E6A96E7F3F0B90C\x1a") # allow any number of reads verify(self.mockDevice, atleast=1).read_lines() verifyNoMoreInteractions(self.mockDevice)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_sms_valid(text=''):\n try:\n text.decode('ascii')\n except:\n return False\n if len(text) > 160:\n return False\n\n return True", "def testSendSmsPduModeError(self):\n\n # setup expectation to raise a non-timeout error with prompt\n when(self.mockDevice).read_lines().thenRaise(Exception(\"something other than timeout\"))\n self.gsm.send_sms(\"1234\", \"Test Message\")\n \n # must see command with size\n verify(self.mockDevice, times=1).write(\"AT+CMGS=21\\r\")\n # must see command to break out of command prompt\n verify(self.mockDevice, times=1).write(\"\\x1b\")\n # must NOT see command with text and terminating char\n verify(self.mockDevice, times=0).write(\"00110004A821430000AA0CD4F29C0E6A96E7F3F0B90C\\x1a\")\n # allow any number of reads\n verify(self.mockDevice, atleast=1).read_lines()\n verifyNoMoreInteractions(self.mockDevice)", "def send_sms_via_modem(self, mobile, text=\"\"):\n\n mobile = self.sanitise_phone(mobile)\n\n # Add '+' before country code\n mobile = \"+\" + mobile\n\n try:\n self.modem.send_sms(mobile, text)\n return True\n except:\n return False", "def astral(msg):\r\n return any(ord(c) > 0xFFFF for c in msg)", "def _validate_ascii(message):\n return all(ord(c) < 128 for c in message)", "def isSpamSMS(textLine):\n\treturn re.sub(\"[\\^w]\", \" \", textLine).split()[0].lower() == \"spam\"", "def isvalidport(txt):\n return txt.isdigit() and int(txt) <= 65535 and int(txt) >= 0", "def message_check(self, message):\n if(message == \"\"):\n return False\n\n if(len(message) > 256):\n return False\n\n return True", "def verify_text(self, text):\n pass", "def _text(self, fromwhom, number, text):\n\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.starttls()\n server.login(self._smsaddress, self._smspw)\n server.sendmail(str(fromwhom), '{}@vtext.com'.format(number),\n str(text))\n server.quit()", "def send_sms(self, sms):\n pass", "def check_ascii_compliance(plaintext: bytes) -> bool:\n return all(c < 128 for c in plaintext)", "def test_get_sms_message(self):\n pass", "def is_addressed_to_us(cls, msg):\n return msg.fields.get('to_addr') in cls.acceptable_to_numbers", "def is_valid_msg(msg):\n for char in msg:\n if char not in string.ascii_letters and char not in string.punctuation and char != ' ':\n return False\n return True", "def __check(self, msg):\n msg = bytearray(msg)\n # Check that header is correct\n if msg[:2] != b'\\xFB\\xBF':\n return False\n # Check that ending is correct\n elif msg[-1:] != b'\\xED':\n return False\n # Check that check byte is correct\n elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):\n return False\n else:\n return True", "def validate_message(self, message):\n\n for char in message:\n if ord(char) < 65 or ord(char) > 90:\n raise ValueError('Invalid message. Enigma Machine only supports messages composed of uppercase letters')", "def sms():\n def send_sms(number, message):\n #get session bus\n try:\n session_bus = dbus.SessionBus()\n except dbus.exceptions.DBusException:\n click.echo(chalk.red('Have a display you must'))\n return\n\n #check for kdeconnect\n try:\n devices_dbus_obj = session_bus.get_object('org.kde.kdeconnect','/modules/kdeconnect/devices')\n except dbus.exceptions.DBusException:\n click.echo(chalk.red('kdeconnect not installed it appears'))\n return\n\n #get devices ids\n devices_xml = devices_dbus_obj.Introspect(dbus_interface='org.freedesktop.DBus.Introspectable')\n devices_xml = ET.fromstring(devices_xml)\n nodes = devices_xml.findall('node')\n if(len(nodes) is 0):\n click.echo(chalk.red('Devices there are not'))\n return\n deviceIDs = list()\n for node in nodes:\n deviceIDs.append(node.get('name'))\n\n #get devices properties\n deviceID_Props = dict()\n for ID in deviceIDs:\n try:\n device = session_bus.get_object('org.kde.kdeconnect', '/modules/kdeconnect/devices/' + ID)\n deviceProps = device.GetAll('', dbus_interface='org.freedesktop.DBus.Properties')\n deviceID_Props[ID] = deviceProps\n except dbus.exceptions.DBusException:\n #don't create an entry in the dictionary if the object, or a GetAll method does not exist\n pass\n if(len(deviceID_Props) is 0):\n click.echo(chalk.red('Devices there are not'))\n return\n\n #eliminate non sms devices\n devices_no_sms = list()\n for device in deviceID_Props:\n keeping = False\n for plugin in deviceID_Props[device]['supportedPlugins']:\n if('sms' in plugin):\n keeping = True\n if(not keeping):\n devices_no_sms.append(device)\n for device in devices_no_sms:\n del deviceID_Props[device]\n\n #if there are no devices that support sms\n if(len(deviceID_Props) is 0):\n click.echo(chalk.red('Devices that support sms there are not'))\n return\n #elif only one device was found that supports sms\n elif(len(deviceID_Props) is 1):\n click.echo(chalk.yellow('Device using: ' + str(list(deviceID_Props.values())[0]['name'])))\n sendMessage = session_bus.get_object('org.kde.kdeconnect', '/modules/kdeconnect/devices/' + str(list(deviceID_Props.keys())[0]) + '/sms')\n sendMessage.sendSms(number, message, dbus_interface='org.kde.kdeconnect.device.sms')\n return\n #otherwise get user to choose device\n else:\n choice_map = dict()\n for idx, device in enumerate(deviceID_Props, start=1):\n click.echo(chalk.green(str(idx) + ': ' + deviceID_Props[device]['name']))\n choice_map[str(idx)] = device\n choice = click.prompt(chalk.blue('Device, you must select: '), default='1', type=click.Choice(choice_map.keys()))\n #click.echo('you chose: ' + choice_map[the_chosen_device] + ' with id: ' + deviceNames_IDs[choice_map[the_chosen_device]])\n sendMessage = session_bus.get_object('org.kde.kdeconnect', '/modules/kdeconnect/devices/' + choice_map[choice] + '/sms')\n sendMessage.sendSms(number, message, dbus_interface='org.kde.kdeconnect.device.sms')\n return\n\n click.echo(chalk.blue('For whom you want to send an sms'))\n friend_name = input().strip()\n friend_name_lower = friend_name.lower()\n if os.path.isfile(PEOPLE_CONFIG_FILE_PATH):\n with open(PEOPLE_CONFIG_FILE_PATH) as fin:\n contents = yaml.load(fin)\n entries = contents['entries']\n for entry in entries:\n if(friend_name == entry['name'] or friend_name_lower == entry['name']):\n number = entry['mobile']\n break\n if('number' not in locals()):\n click.echo(chalk.red('Friend not found.'))\n else:\n if(len(number) is not 0):\n click.echo(chalk.blue('Message, you must enter: '))\n message = input(':')\n click.echo(chalk.yellow('Device to send sms to ' + number + ' looking for: '))\n send_sms(number, message)\n else:\n click.echo(chalk.red('Friends number not in people file, run `yoda people setup` to add it.'))\n else:\n click.echo(chalk.red('The People file does not exist, run `yoda people setup` to create an entry.'))", "def isMAC(s):\n\n s = s.replace(':', '')\n if len(s) != 12: return 0\n for char in s:\n if re.compile('[a-zA-Z0-9]+').match(char) == None: return 0\n return 1", "def send_text(msg, up):\n try:\n client = TwilioRestClient(account=TWILIO_ACCOUNT_SID,\n token=TWILIO_AUTH_TOKEN)\n c = client.sms.messages.create(to=up.phone,\n from_=WATTTIME_PHONE,\n body=msg.msg)\n TwilioSMSEvent(user=up.user,\n msg_type=msg.msg_type,\n to_number=up.phone,\n from_number=WATTTIME_PHONE,\n body=msg.msg).save()\n\n debug(\"texted '{}' to {}\".format(msg, str(up.name)))\n return True\n except:\n print (\"Faild message\", up.phone, WATTTIME_PHONE, msg.msg)\n debug(\"failed to text '{}' to {}\".format(msg, str(up.name)))\n return False", "def isValid(text):\n return bool(re.search(r'\\R2D2\\b', text, re.IGNORECASE))", "def parseable(message_data):\n if not message_data:\n raise TypeError('message_data must not be None')\n\n acceptable = range(97, 123) + range(65, 91) + range(48, 58) + range(33, 43) + range(44, 48) + [58, 63, 64, 94]\n return any(ord(c) not in acceptable for c in message_data['message'].replace(' ', ''))", "def check_pass(text):\r\n\r\n upperRegex = re.compile(r'[A-Z]')\r\n lowerRegex = re.compile(r'[a-z]')\r\n lengthRegex = re.compile(r'.{8,}')\r\n digitRegex = re.compile(r'\\d')\r\n\r\n if not upperRegex.search(text):\r\n return False\r\n elif not lowerRegex.search(text):\r\n return False\r\n elif not lengthRegex.search(text):\r\n return False\r\n elif not digitRegex.search(text):\r\n return False\r\n else:\r\n return True", "def test_smsmessage_user():", "def isScintillator(string, pos):\n return ((pos == 65 or pos == 66) and\n (string == 12 or string == 62))", "def sms_code(string: str) -> Union[str, None]:\n m = p.search(string.strip())\n if m: m = m.group().replace('-', '').replace('sms:', '')\n return m", "def send_sms(self, message, to=CONTACT_NUMBER):\n try:\n pbx_alarm = PBXAlert()\n pbx_alarm.send_sms(self.tinfo['message'])\n if self.verbose:\n print(\"{} Successfully sent SMS!\".format(Timer.OK))\n return True\n except Exception as e:\n print(\"{} Caught exception in send_sms: {}\".format(Timer.FAIL, e))\n return False", "def _msim_message_test(\n self,\n ad_mo,\n ad_mt,\n mo_sub_id,\n mt_sub_id, msg=\"SMS\",\n max_wait_time=MAX_WAIT_TIME_SMS_RECEIVE,\n expected_result=True):\n\n if msg == \"SMS\":\n for length in self.message_lengths:\n message_array = [rand_ascii_str(length)]\n if not sms_send_receive_verify_for_subscription(\n self.log,\n ad_mo,\n ad_mt,\n mo_sub_id,\n mt_sub_id,\n message_array,\n max_wait_time):\n ad_mo.log.warning(\n \"%s of length %s test failed\", msg, length)\n return False\n else:\n ad_mo.log.info(\n \"%s of length %s test succeeded\", msg, length)\n self.log.info(\"%s test of length %s characters succeeded.\",\n msg, self.message_lengths)\n\n elif msg == \"MMS\":\n for length in self.message_lengths:\n message_array = [(\"Test Message\", rand_ascii_str(length), None)]\n\n if not mms_send_receive_verify(\n self.log,\n ad_mo,\n ad_mt,\n message_array,\n max_wait_time,\n expected_result):\n self.log.warning(\"%s of body length %s test failed\",\n msg, length)\n return False\n else:\n self.log.info(\n \"%s of body length %s test succeeded\", msg, length)\n self.log.info(\"%s test of body lengths %s succeeded\",\n msg, self.message_lengths)\n return True", "def isValid(text):\n return bool(re.search(r\"\\b((close|activate)\\ (check|tunnel|ubuntu|fedora|windows))\\b\", text, re.IGNORECASE))", "def validate_seq(sequence):\n sequence = sequence.strip()\n sequence = sequence.replace(\" \", \"\")\n sequence.upper()\n regex = re.compile('^[ACTGNRYSWKMBDHVEFILPQSXZ]*$', re.I)\n if regex.search(sequence) is not None:\n return True\n else:\n return False", "def __isHexString(self, text):\n return all(map(lambda c: c in \"0123456789abcdefABCDEF\", text))", "def test_sending_sms(self):\n try:\n from django.conf import settings\n except ImportError:\n self.fail(msg=\"No TEST_NUMBER found in settings!\")\n\n from rapidsms.router import send\n from rapidsms.models import Connection, Backend\n from random import randint\n\n b = Backend.objects.get_or_create(name='envaya_nexmo')[0]\n c = Connection.objects.get_or_create(identity = settings.TEST_NUMBER, backend = b)[0]\n msg = \"Hey, this is a test message from NexmoOutgoingBackendTest! \\n Your Lucky number is %s\" % (randint(1,42))\n\n send(msg,[c])\n print \"Cannot actually verify whether the message was sent or not because of the limitations of rapdisms framework :-/\"", "def isValid(text):\n return bool(re.search(r'\\b((kill|stop) the (alarm|clock|music))\\b', text, re.IGNORECASE))", "def text_cell_phone(self, sender, message):\n if self.cell_phone:\n text_message.send_sms(sender, message, self.cell_phone)", "def check_message(check):\n words_of_message = speech_text.split()\n if set(check).issubset(set(words_of_message)):\n return True\n else:\n return False", "def check_ont_address_format(address):\n if len(address) != 34:\n return False\n\n for ch in address:\n if ch not in '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz':\n return False\n\n return True", "def check(self, text):\n lt = s = n = 0\n result = False\n for g in text:\n if g in LETTERS and lt < self.letters:\n lt += 1\n if g in NUMBERS and n < self.numbers:\n n += 1\n if g in SYMBOLS and s < self.symbols:\n s += 1\n if n == self.numbers and s == self.symbols and lt == self.letters:\n result = True\n break\n return result", "def isSpam(textLine):\n\treturn True", "def match(self, buf):\n X = 0x58\n S = 0x53\n Y = 0x79\n M = 0x6d\n NEWLINE = 0xa\n ZERO = 0x30\n NINE = 0x39\n return len(buf) >= 10 and \\\n buf[0] == X and \\\n buf[1] == S and \\\n buf[2] == Y and \\\n buf[3] == M and \\\n buf[4] == NEWLINE and \\\n buf[5] >= ZERO and buf[5] <= NINE and \\\n buf[6] >= ZERO and buf[6] <= NINE and \\\n buf[7] >= ZERO and buf[7] <= NINE and \\\n buf[8] >= ZERO and buf[8] <= NINE and \\\n buf[9] == NEWLINE", "def check(self, text):\n p = self.d\n i = 0\n j = 0\n result = []\n ln = len(text)\n while i + j < ln:\n t = text[i + j].lower()\n # print i,j,hex(ord(t))\n if not (t in p):\n j = 0\n i += 1\n p = self.d\n continue\n p = p[t]\n j += 1\n # print p,i,j\n if chr(11) in p:\n p = self.d\n result.append(text[i:i + j])\n i = i + j\n j = 0\n return result", "def is_accepted_phone_number(self, dst):\n\n if self.cfg['VoipIO']['allowed_phone_numbers']:\n p = re.search(self.cfg['VoipIO']['allowed_phone_numbers'], dst)\n if not p:\n return False\n\n if self.cfg['VoipIO']['forbidden_phone_number']:\n p = re.search(self.cfg['VoipIO']['forbidden_phone_number'], dst)\n if p:\n return False\n\n return True", "def find_message(text: str) -> str:\n if not all(ch in string.printable for ch in text):\n return\n elif not text:\n return \"\"\n if len(text) > 1000:\n return\n decode_message = \"\"\n # check case sensitive for a simple character in string\n for correct_char in [char for char in text\\\n if char.upper() == char and char.lower()\\\n in 'abcdefghijklmnopqrstuwxyz']:\n decode_message += correct_char\n return decode_message", "def isValid(text):\r\n return bool(re.search(r'\\bcommute\\b', text, re.IGNORECASE))", "def test_alnum(self, address):\n t=address.replace(\" \", \"\").isalnum()\n assert t, \"it only accept digits and letters\"", "def test_recipient_not_str_error(\n config,\n):\n sms = YesssSMS.YesssSMS(\"0000000000\", \"2d4faa0ea6f55813\")\n with pytest.raises(ValueError):\n sms.send(176264916361239, \"test\")", "def ch_t_checker(self, seq):\n seq = re.sub(r'чт', r'шт', seq)\n return seq", "def process_sms():\n phone_number = request.values.get('From', None)\n sms_message = request.values.get('Body', None)\n resp = twilio.twiml.Response()\n regina_answer = ask_regina(phone_number, sms_message, \"sms\")['text']\n resp.message(regina_answer)\n return str(resp)", "def mp_mb_checker(self, seq):\n# print('input ' + seq)\n seq = re.sub(r'([ёуеыаоэяию])м(п|б)',r'\\1н\\2',seq)\n# print('output ' + seq)\n return seq", "def can_recept(self, text, *args, **kwargs):\n # such slot always can recept (when message is not empty) because it consumes the message\n if text:\n return True\n else:\n return False", "def _handle_ok_ack(string):\n if string.strip() == Parser.OK_MSG:\n return True\n return False", "def test_unsupported_chars_error(\n config,\n):\n with requests_mock.Mocker() as m:\n sms = YesssSMS.YesssSMS(\"0000000000\", \"2d4faa0ea6f55813\")\n m.register_uri(\n \"POST\",\n # pylint: disable=protected-access\n sms._login_url,\n status_code=302,\n # pylint: disable=protected-access\n headers={\"location\": sms._kontomanager},\n )\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n sms._kontomanager,\n status_code=200,\n )\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n sms._sms_form_url,\n status_code=200,\n text=TEST_FORM_TOKEN_SAMPLE,\n )\n m.register_uri(\n \"POST\",\n # pylint: disable=protected-access\n sms._send_sms_url,\n status_code=200,\n text=_UNSUPPORTED_CHARS_STRING,\n )\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n sms._logout_url,\n status_code=200,\n )\n with pytest.raises(sms.UnsupportedCharsError):\n sms.send(YESSS_TO, \"test\")", "def check_as_htk_phone(phone):\n try:\n phone = str(phone)\n except UnicodeEncodeError:\n return False\n\n # Must not contain spaces\n phone_copy = phone.strip()\n if len(phone_copy) != len(phone):\n return False\n\n # Must contain characters!\n if len(phone) == 0:\n return False\n\n # Must not start by minus or plus\n if phone[0] in ['-', '+']:\n return False\n\n # Must not start by a digit\n try:\n int(phone[0])\n except ValueError:\n return False\n\n return True", "def verify_omm_license_agrmt():\r\n msg = \"\"\r\n try:\r\n if g.platform == 'android':\r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.license_agrmt_input)\r\n if not text_to_verify:\r\n print \"Unable to retrive text to verify license agreement input file\"\r\n return False, msg\r\n\r\n 'Getting text from License agreement page'\r\n text = ui_controls.text_view(get_obj_identifier('license_agreeText_textView'))\r\n if not text:\r\n print \"Text retrieved from text view is empty\"\r\n return False, msg\r\n 'Comparing text retrieved from UI with verification input text'\r\n if text_to_verify.strip() == text.strip():\r\n print (\"License agreement displayed in UI is matching with text to verify for license agreement\") + \\\r\n (\"Verification Text- %s\" % text_to_verify) + \\\r\n (\"############Text from UI is #########\\n %s\\n\" % text)\r\n else:\r\n return False, msg\r\n else:\r\n print \"IOS value and text does nto return entire license text. Hence cannot validate license text in IOS\"\r\n return True, msg\r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return False, msg\r\n return True, msg", "def is_phone_number(self, dst):\n p = re.search('(^(\\+|00)?[0-9]{1,12}$)', dst)\n if not p:\n return False\n\n return True", "def is_bcc_correct(self, buff, end_index, start_index=1):\n\t\treturn calculate_bcc_iso1155(buff[start_index:end_index+1]) == ord(buff[end_index+1])", "def sms_disabled(self):\n return self._sms_disabled", "def isValid(self) :\n try :\n pos = 0\n while self.firstblock[pos] == chr(0) :\n pos += 1\n except IndexError : \n return False\n else : \n firstblock = self.firstblock[pos:]\n if firstblock.startswith(\"\\033E\\033\") or \\\n firstblock.startswith(\"\\033%1BBPIN;\") or \\\n ((pos == 11000) and firstblock.startswith(\"\\033\")) or \\\n (firstblock.startswith(\"\\033*rbC\") and (not self.lastblock[-3:] == \"\\f\\033@\")) or \\\n firstblock.startswith(\"\\033*rB\\033\") or \\\n firstblock.startswith(\"\\033%8\\033\") or \\\n (firstblock.find(\"\\033%-12345X\") != -1) or \\\n (firstblock.find(\"@PJL ENTER LANGUAGE=PCL\\012\\015\\033\") != -1) or \\\n (firstblock.startswith(chr(0xcd)+chr(0xca)) and (firstblock.find(\"\\033E\\033\") != -1)) :\n return True\n else : \n return False", "def process_sms_registration(msg):\n registration_processed = False\n text_words = msg.text.upper().split()\n keyword1 = text_words[0] if len(text_words) > 0 else \"\"\n keyword2 = text_words[1].lower() if len(text_words) > 1 else \"\"\n keyword3 = text_words[2] if len(text_words) > 2 else \"\"\n if keyword1 in REGISTRATION_KEYWORDS and keyword2 != \"\":\n domain = Domain.get_by_name(keyword2, strict=True)\n if domain is not None:\n if keyword3 in REGISTRATION_MOBILE_WORKER_KEYWORDS and domain.sms_mobile_worker_registration_enabled:\n #TODO: Register a PendingMobileWorker object that must be approved by a domain admin\n pass\n elif domain.sms_case_registration_enabled:\n register_sms_contact(\n domain=domain.name,\n case_type=domain.sms_case_registration_type,\n case_name=\"unknown\",\n user_id=domain.sms_case_registration_user_id,\n contact_phone_number=strip_plus(msg.phone_number),\n contact_phone_number_is_verified=\"1\",\n owner_id=domain.sms_case_registration_owner_id,\n )\n msg.domain = domain.name\n msg.save()\n registration_processed = True\n \n return registration_processed", "def handle_privmsg(self, ievent):\n\n if ievent.txt and ievent.txt[0] == '\\001':\n self.handle_ctcp(ievent)\n return 1", "def _mac_test(mac):\n\n\t\tif re.search(r'([0-9A-F]{2}[:]){5}([0-9A-F]){2}', mac.upper()) is not None:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def validacion(texto):\n bandera = False\n for caracter in texto:\n if caracter != '0' and caracter != '1':\n bandera = True\n if bandera == True:\n print(\"El texto ingresado no es binario\")\n return bandera", "def send_confirmation_sms(cls, user_obj: User) -> bool:\n confirmation_code = cls.set_confirmation_code(user_obj)\n id_string = '%s%d' % (user_obj.id, datetime.now().timestamp())\n\n data = {\n 'login': settings.NIKITA_LOGIN,\n 'pwd': settings.NIKITA_PASSWORD,\n 'id': id_string,\n 'sender': settings.NIKITA_SENDER,\n 'text': f'Ваш код активации: {confirmation_code}',\n 'phones': [str(user_obj.phone).replace('+', '')],\n 'test': settings.NIKITA_TEST\n }\n page = dicttoxml(data, custom_root='message',\n item_func=lambda x: x[:-1], attr_type=False)\n response = requests.post(\n 'https://smspro.nikita.kg/api/message',\n data=page, headers={'Content-Type': 'application/xml'}\n )\n response_dict = xmltodict.parse(response.text)\n status = response_dict['response']['status']\n return True if status in ('0', '11') else False", "def is_outgoing(self, pkt):\n try:\n return pkt[Ether].src.lower() == get_if_hwaddr(conf.iface).lower()\n except IndexError:\n return False", "def testIsText(self):\n parser = text_parser.PyparsingSingleLineTextParser()\n\n bytes_in = b'this is My Weird ASCII and non whatever string.'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = 'Plaso Síar Og Raðar Þessu'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = b'\\x01\\\\62LSO\\xFF'\n self.assertFalse(parser._IsText(bytes_in))\n\n bytes_in = b'T\\x00h\\x00i\\x00s\\x00\\x20\\x00'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = b'Ascii\\x00'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = b'Ascii Open then...\\x00\\x99\\x23'\n self.assertFalse(parser._IsText(bytes_in))", "def mandar_mensage(self, msg=PING) -> bool:\n ack: TransmitStatusPacket = None\n # Transformamos el mensaje recibido en un string tratable\n msg = str(msg)\n # Recuperamos la dirección del dispositivo remoto en formato de 64 bits\n high = self.remote_Zigbee.get_64bit_addr()\n # Recuperamos la dirección del dispositivo remoto en 16 bits o la marcamos como desconocida\n low = self.remote_Zigbee.get_16bit_addr() or XBee16BitAddress.UNKNOWN_ADDRESS\n try:\n # Intentamos mandar el mensaje\n ## Versión sin fragmentar el paquete\n ack = super().send_data_64_16(high, low, msg)\n self.logger.debug(format(ack))\n if ack.transmit_status is not TransmitStatus.SUCCESS:\n self.logger.warning(format(ack))\n\n except Exception as e:\n self.logger.error(\"Se ha encontrado un error al mandar el mensaje\\n\\t\" + str(e))\n # Añadir código para el reintento\n else:\n # TODO Borrar esta traza de control\n self.logger.info(\"Mandado mensaje:\\t\" + msg)\n return ack.transmit_status is TransmitStatus.SUCCESS", "def func_ehlo(self, data):\n data_list = bytes(data).decode().encode('ascii', 'ignore').decode().split(' ')\n if data_list[0].lower().rstrip() == 'ehlo':\n message = '250-' + self.hostname + '\\r\\n250-PIPELINING\\r\\n' \\\n + '250-8BITMIME\\r\\n250-SIZE ' \\\n + str(self.data_recv_size) \\\n + '\\r\\n' + '250 AUTH LOGIN PLAIN'\n self.func_sender(message)\n return True", "def _test_msim_message(\n self,\n mo_slot,\n mt_slot,\n dds_slot,\n msg=\"SMS\",\n mo_rat=[\"\", \"\"],\n mt_rat=[\"\", \"\"],\n direction=\"mo\",\n expected_result=True):\n ads = self.android_devices\n\n if direction == \"mo\":\n ad_mo = ads[0]\n ad_mt = ads[1]\n else:\n ad_mo = ads[1]\n ad_mt = ads[0]\n\n if mo_slot is not None:\n mo_sub_id = get_subid_from_slot_index(self.log, ad_mo, mo_slot)\n if mo_sub_id == INVALID_SUB_ID:\n ad_mo.log.warning(\"Failed to get sub ID at slot %s.\", mo_slot)\n return False\n mo_other_sub_id = get_subid_from_slot_index(\n self.log, ad_mo, 1-mo_slot)\n set_message_subid(ad_mo, mo_sub_id)\n else:\n _, mo_sub_id, _ = get_subid_on_same_network_of_host_ad(\n ads, type=\"sms\")\n if mo_sub_id == INVALID_SUB_ID:\n ad_mo.log.warning(\"Failed to get sub ID at slot %s.\", mo_slot)\n return False\n mo_slot = \"auto\"\n set_message_subid(ad_mo, mo_sub_id)\n if msg == \"MMS\":\n set_subid_for_data(ad_mo, mo_sub_id)\n ad_mo.droid.telephonyToggleDataConnection(True)\n ad_mo.log.info(\"Sub ID for outgoing %s at slot %s: %s\", msg, mo_slot,\n get_outgoing_message_sub_id(ad_mo))\n\n if mt_slot is not None:\n mt_sub_id = get_subid_from_slot_index(self.log, ad_mt, mt_slot)\n if mt_sub_id == INVALID_SUB_ID:\n ad_mt.log.warning(\"Failed to get sub ID at slot %s.\", mt_slot)\n return False\n mt_other_sub_id = get_subid_from_slot_index(\n self.log, ad_mt, 1-mt_slot)\n set_message_subid(ad_mt, mt_sub_id)\n else:\n _, mt_sub_id, _ = get_subid_on_same_network_of_host_ad(\n ads, type=\"sms\")\n if mt_sub_id == INVALID_SUB_ID:\n ad_mt.log.warning(\"Failed to get sub ID at slot %s.\", mt_slot)\n return False\n mt_slot = \"auto\"\n set_message_subid(ad_mt, mt_sub_id)\n if msg == \"MMS\":\n set_subid_for_data(ad_mt, mt_sub_id)\n ad_mt.droid.telephonyToggleDataConnection(True)\n ad_mt.log.info(\"Sub ID for incoming %s at slot %s: %s\", msg, mt_slot,\n get_outgoing_message_sub_id(ad_mt))\n\n self.log.info(\"Step 1: Switch DDS.\")\n if dds_slot:\n if not set_dds_on_slot_1(ads[0]):\n self.log.warning(\n \"Failed to set DDS at eSIM on %s\", ads[0].serial)\n return False\n else:\n if not set_dds_on_slot_0(ads[0]):\n self.log.warning(\n \"Failed to set DDS at pSIM on %s\", ads[0].serial)\n return False\n\n self.log.info(\"Step 2: Check HTTP connection after DDS switch.\")\n if not verify_http_connection(self.log,\n ads[0],\n url=\"https://www.google.com\",\n retry=5,\n retry_interval=15,\n expected_state=True):\n\n self.log.error(\"Failed to verify http connection.\")\n return False\n else:\n self.log.info(\"Verify http connection successfully.\")\n\n mo_phone_setup_func_argv = (self.log, ad_mo, mo_sub_id)\n mt_phone_setup_func_argv = (self.log, ad_mt, mt_sub_id)\n\n if mo_slot in (0, 1):\n # set up the rat on mo side another slot which not to be test(primary device)\n phone_setup_on_rat(self.log, ad_mo, mo_rat[1-mo_slot], mo_other_sub_id)\n # get phone setup function and required argument of primary device\n if '5g' in mo_rat[mo_slot].lower():\n mo_phone_setup_func_argv = (self.log, ad_mo, mo_sub_id, GEN_5G)\n mo_phone_setup_func = phone_setup_on_rat(\n self.log,\n ad_mo,\n mo_rat[mo_slot],\n only_return_fn=True)\n else:\n # set up the rat and get phone setup function on mo side(non-primary device)\n phone_setup_on_rat(self.log, ad_mo, 'general', sub_id_type='sms')\n mo_phone_setup_func = phone_setup_voice_general_for_subscription\n\n if mt_slot in (0, 1):\n # set up the rat on mt side another slot which not to be test(primary device)\n phone_setup_on_rat(self.log, ad_mt, mt_rat[1-mt_slot], mt_other_sub_id)\n # get phone setup function and required argument of primary device\n if '5g' in mt_rat[mt_slot].lower():\n mt_phone_setup_func_argv = (self.log, ad_mt, mt_sub_id, GEN_5G)\n mt_phone_setup_func = phone_setup_on_rat(\n self.log,\n ad_mt,\n mt_rat[mt_slot],\n only_return_fn=True)\n else:\n # set up the rat and get phone setup function on mt side(non-primary device)\n phone_setup_on_rat(self.log, ad_mt, 'general', sub_id_type='sms')\n mt_phone_setup_func = phone_setup_voice_general_for_subscription\n\n self.log.info(\"Step 3: Set up phones in desired RAT.\")\n tasks = [(mo_phone_setup_func, mo_phone_setup_func_argv),\n (mt_phone_setup_func, mt_phone_setup_func_argv)]\n if not multithread_func(self.log, tasks):\n self.log.error(\"Phone Failed to Set Up Properly.\")\n return False\n\n time.sleep(WAIT_TIME_ANDROID_STATE_SETTLING)\n self.log.info(\"Step 4: Send %s.\", msg)\n\n if msg == \"MMS\":\n for ad, current_data_sub_id, current_msg_sub_id in [\n [ ads[0],\n get_default_data_sub_id(ads[0]),\n get_outgoing_message_sub_id(ads[0]) ],\n [ ads[1],\n get_default_data_sub_id(ads[1]),\n get_outgoing_message_sub_id(ads[1]) ]]:\n if current_data_sub_id != current_msg_sub_id:\n ad.log.warning(\n \"Current data sub ID (%s) does not match message\"\n \" sub ID (%s). MMS should NOT be sent.\",\n current_data_sub_id,\n current_msg_sub_id)\n expected_result = False\n\n result = self._msim_message_test(ad_mo, ad_mt, mo_sub_id, mt_sub_id,\n msg=msg, expected_result=expected_result)\n\n if not result:\n log_messaging_screen_shot(ad_mo, test_name=\"%s_tx\" % msg)\n log_messaging_screen_shot(ad_mt, test_name=\"%s_rx\" % msg)\n\n return result", "def test_away(self):\n message = \"Sorry, I'm not here.\"\n self.protocol.away(message)\n expected = [\n \"AWAY :{}\".format(message),\n \"\",\n ]\n self.assertEqualBufferValue(self.transport.value().split(b\"\\r\\n\"), expected)", "def send_sms(self,msg,to=None,long=True):\n if long:\n return self.send_msg(msg,to,\"SendCatSMS\")\n else:\n return self.send_msg(msg,to,\"SendSMS\")", "def test_40_phonenumbers_too_long(self):\n number_phone = self.samples[4]\n with self.assertRaises(osv.except_osv):\n self.pn._symbol_set_char(number_phone)", "def handle_telnet_cmd(self, telnet_cmd):\n print(\"Telnet cmd: {}\".format(telnet_cmd))\n # termious hack\n if self.termious is None:\n if len(telnet_cmd) == 8:\n if telnet_cmd[0] == 250 and telnet_cmd[1] == 31:\n self.termious = True\n if len(telnet_cmd) == 2:\n if telnet_cmd[0] == 251 and telnet_cmd[1] == 31:\n self.termious = False", "def send_system_exclusive(self, value=\"\"):\n msg = parse_sysex_string(value)\n\n if (msg and msg.startswith(b'\\xF0') and msg.endswith(b'\\xF7') and\n all((val <= 0x7F for val in msg[1:-1]))):\n self._midi.send_message(msg)\n else:\n raise ValueError(\"Invalid sysex string: %s\", msg)", "def is_port(inString):\r\n if is_int(inString):\r\n intiger = int(inString)\r\n return intiger >= 0 and intiger < 65536\r\n #the 0 is acepted, beacuse later it will be modifyed\r\n else:\r\n return False", "def is_ascii(token):\n\n printable = set(string.printable)\n\n for char in token:\n if char not in printable:\n return False\n\n return True", "def check_message(self, msg):\n pass", "def barcode_is_valid(s):\n return (bool(re.match(r'^[ATGC]*$',s))\n or barcode_is_10xgenomics(s))", "def said(self, text):\n for message in self.messages:\n if text in message:\n return True\n return False", "def degsm(self):\n self.success = False", "def client_receives():\n test_str = \"t35t1nG cl13nT r3c31\\/1NG\\n\"\n server = start_server()\n client = start_client()\n\n write_to(client, test_str)\n segments = read_segments_from(server)\n if not segments:\n return False\n\n # The first segment should be one received from the client, and should have\n # the correct length.\n segment = segments[0]\n return (\n str(segment.dest_port) == SERVER_PORT and\n segment.length == CTCP_HEADER_LEN + len(test_str)\n )", "def send_sms(to, datas, temp_id):\n cpp = CCP()\n cpp.sendTemplateSMS(to, datas, temp_id)", "def mobile_validator(mobile):\n if mobile[0:4] != '+989':\n raise ValidationError('Invalid mobile')", "def validaTexto(text,entrada): \n\tvalido=True\n\tif (entrada == \"rut\"):\n\t\tcadena = \"0123456789kK\"\n\tif (entrada == \"num\"):\n\t\tcadena = \"0123456789\"\n\tif (entrada == \"sinSimbolos\"):\n\t\tcadena = \" ,.-abcdefghijklmnñopqrstuvwxyzáéíóúABCDEFGHIJKLMNOPQRSTUVWXYZÁÉÍÓÚ0123456789\"\n\tif (entrada == \"texto\"):\n\t\tcadena = \" abcdefghijklmnñopqrstuvwxyzáéíóúABCDEFGHIJKLMNOPQRSTUVWXYZÁÉÍÓÚ\"\n\tif (entrada == \"rut_login\"):\n\t\tcadena = \"0123456789kK-\"\n\ti=0\n\tstring_num=str(text.encode('utf-8'))\n\tif(len(string_num)==0):\n\t\tvalido=False\n\twhile(valido and (i<len(string_num))):\n\t\tif (not string_num[i] in cadena):\n\t\t\tvalido=False\n\t\ti=i+1\n\treturn valido", "def __valid_token_format(self, token):\n if len(token) != self.TOKEN_LENGTH * 2:\n return False\n for c in token:\n if c not in '01234567890abcdef':\n return False\n return True", "def check_mail(eml):\n return eml[::-1] if eml != '#N/A' else '#N/A'", "def validate_fasta_seq(sequence):\n sequence = sequence.replace(\" \", \"\")\n sequence.upper()\n regex = re.compile('>\\S*\\n[ACTGNRYSWKMBDHVEFILPQSXZ]*', re.MULTILINE)\n if regex.search(sequence) is not None:\n return True\n else:\n return False", "def send_sms_via_api(self, mobile, text=\"\"):\n\n mobile = self.sanitise_phone(mobile)\n\n try:\n self.sms_api_post_config[self.sms_api.message_variable] = text\n self.sms_api_post_config[self.sms_api.to_variable] = str(mobile)\n query = urllib.urlencode(self.sms_api_post_config)\n request = urllib.urlopen(self.sms_api.url, query)\n output = request.read()\n return True\n except:\n return False", "def looks_like_a_smiles(self):\n regexp = r\"^([^J][0-9BCOHNSOPIFKcons@+\\-\\[\\]\\(\\)\\\\\\/%=#$,.~&!|Si|Se|Br|Mg|Na|Cl|Al]{3,})$\"\n return re.search(regexp, self.dirty) is not None", "def valid_message_length(self):\n if self.message_len() > 0:\n if self.message_len() <= self.max_msg_length:\n return True\n return False", "def message(self, text):\n lines = str(text).split('\\n') # Split at newline(s)\n for i, line in enumerate(lines): # For each substring...\n if i > 0: # If newline(s),\n self.write_lcd(self.LCD_DATA_E1, 0xC0) # set DDRAM address to 2nd line\n self.write_lcd(self.LCD_DATA_E1, line, True) # Issue substring", "def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False", "def isascii(s):\n return len(s) == len(s.encode())", "def isascii(s):\n return len(s) == len(s.encode())", "def test_outgoing_message_send(self):\n message = self.create_outgoing_message()\n message.send()\n self.assertEqual(self.outbound[0].text, message.text)", "def isValid(text):\n return bool(re.search(r'\\b(start|stop) (look|watch|guard)ing\\b', text, re.IGNORECASE))", "def check_eth_address_format(address):\n if len(address) != 42 or address[:2] != '0x':\n return False\n\n for ch in address[2:]:\n if ch not in \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890\":\n return False\n\n return True", "def is_valid_number(self, text, widget):\n if len(text) > 2:\n return False\n for char in text:\n if not char.isdigit():\n return False\n if text != '' and int(text) == 0:\n return False\n return True", "def test_send(self):\n self.inverter.send(b\"\\x00\\x01\\x02\", b\"\")\n received_message = self.sock.recv(4096)\n self.assertEqual(message, received_message)", "def test_text(self):\n server, client = loopback()\n with pytest.warns(DeprecationWarning) as w:\n count = server.send(b\"xy\".decode(\"ascii\"))\n assert \"{0} for buf is no longer accepted, use bytes\".format(\n WARNING_TYPE_EXPECTED\n ) == str(w[-1].message)\n assert count == 2\n assert client.recv(2) == b\"xy\"", "def test_delete_sms_message(self):\n pass", "def test_is_valid(self, address):\n self.test_string(address)\n self.test_alnum(address)" ]
[ "0.7446568", "0.6175791", "0.59026414", "0.5805947", "0.56742376", "0.5664919", "0.55345494", "0.5473586", "0.5392989", "0.5327824", "0.53224766", "0.53210604", "0.5278678", "0.52711344", "0.5263743", "0.52219886", "0.52177036", "0.51783454", "0.5167334", "0.51671714", "0.516648", "0.51610374", "0.5158195", "0.51408297", "0.5066006", "0.50655586", "0.50389147", "0.5034926", "0.5024629", "0.501616", "0.5009411", "0.49801442", "0.49795407", "0.49764717", "0.49627027", "0.49587", "0.49571982", "0.4955371", "0.49496433", "0.49277437", "0.49248573", "0.49248537", "0.49192035", "0.48835936", "0.48716798", "0.48673958", "0.48559502", "0.48538628", "0.4843497", "0.48422807", "0.48420215", "0.48261657", "0.48145398", "0.48134667", "0.48035395", "0.48004436", "0.47926444", "0.47852302", "0.47814712", "0.4778878", "0.47723034", "0.476614", "0.47650236", "0.475633", "0.47486627", "0.47443882", "0.47375044", "0.4737334", "0.4735941", "0.4735604", "0.47219956", "0.4703893", "0.4703352", "0.46958503", "0.46918857", "0.46892956", "0.46803275", "0.46777248", "0.4676085", "0.4671178", "0.46701753", "0.4667181", "0.46643552", "0.46525392", "0.4644889", "0.46448007", "0.46414247", "0.46389174", "0.46349132", "0.4632991", "0.4623262", "0.4623262", "0.4618969", "0.46147576", "0.46106845", "0.46086505", "0.46086195", "0.4605939", "0.4605215", "0.46027282" ]
0.6486989
1
Checks that the GsmModem in PDU mode does not send message if error, when the text is within ASCII chars 22 126.
def testSendSmsPduModeError(self): # setup expectation to raise a non-timeout error with prompt when(self.mockDevice).read_lines().thenRaise(Exception("something other than timeout")) self.gsm.send_sms("1234", "Test Message") # must see command with size verify(self.mockDevice, times=1).write("AT+CMGS=21\r") # must see command to break out of command prompt verify(self.mockDevice, times=1).write("\x1b") # must NOT see command with text and terminating char verify(self.mockDevice, times=0).write("00110004A821430000AA0CD4F29C0E6A96E7F3F0B90C\x1a") # allow any number of reads verify(self.mockDevice, atleast=1).read_lines() verifyNoMoreInteractions(self.mockDevice)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_sms_valid(text=''):\n try:\n text.decode('ascii')\n except:\n return False\n if len(text) > 160:\n return False\n\n return True", "def testSendSmsPduMode(self):\n \n # setup expectation to raise a timeout error with prompt\n err = errors.GsmReadTimeoutError(\">\")\n when(self.mockDevice).read_lines().thenRaise(err).thenReturn(self.oklines)\n self.gsm.send_sms(\"1234\", \"Test Message\")\n \n # must see command with size\n verify(self.mockDevice, times=1).write(\"AT+CMGS=21\\r\")\n # must see command with text and terminating char\n verify(self.mockDevice, times=1).write(\"00110004A821430000AA0CD4F29C0E6A96E7F3F0B90C\\x1a\")\n # allow any number of reads\n verify(self.mockDevice, atleast=1).read_lines()\n verifyNoMoreInteractions(self.mockDevice)", "def message_check(self, message):\n if(message == \"\"):\n return False\n\n if(len(message) > 256):\n return False\n\n return True", "def _validate_ascii(message):\n return all(ord(c) < 128 for c in message)", "def __check(self, msg):\n msg = bytearray(msg)\n # Check that header is correct\n if msg[:2] != b'\\xFB\\xBF':\n return False\n # Check that ending is correct\n elif msg[-1:] != b'\\xED':\n return False\n # Check that check byte is correct\n elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):\n return False\n else:\n return True", "def check_message(self, msg):\n pass", "def func_ehlo(self, data):\n data_list = bytes(data).decode().encode('ascii', 'ignore').decode().split(' ')\n if data_list[0].lower().rstrip() == 'ehlo':\n message = '250-' + self.hostname + '\\r\\n250-PIPELINING\\r\\n' \\\n + '250-8BITMIME\\r\\n250-SIZE ' \\\n + str(self.data_recv_size) \\\n + '\\r\\n' + '250 AUTH LOGIN PLAIN'\n self.func_sender(message)\n return True", "def is_valid_msg(msg):\n for char in msg:\n if char not in string.ascii_letters and char not in string.punctuation and char != ' ':\n return False\n return True", "def send_sms_via_modem(self, mobile, text=\"\"):\n\n mobile = self.sanitise_phone(mobile)\n\n # Add '+' before country code\n mobile = \"+\" + mobile\n\n try:\n self.modem.send_sms(mobile, text)\n return True\n except:\n return False", "def testInvalidCommand(self):\n self.mgr.sendGoProCommand(140, (1, 0, 0, 0))\n self.assertFalse(self.v.message_factory.gopro_set_request_encode.called)", "def message(self, text):\n lines = str(text).split('\\n') # Split at newline(s)\n for i, line in enumerate(lines): # For each substring...\n if i > 0: # If newline(s),\n self.write_lcd(self.LCD_DATA_E1, 0xC0) # set DDRAM address to 2nd line\n self.write_lcd(self.LCD_DATA_E1, line, True) # Issue substring", "def astral(msg):\r\n return any(ord(c) > 0xFFFF for c in msg)", "def find_message(text: str) -> str:\n if not all(ch in string.printable for ch in text):\n return\n elif not text:\n return \"\"\n if len(text) > 1000:\n return\n decode_message = \"\"\n # check case sensitive for a simple character in string\n for correct_char in [char for char in text\\\n if char.upper() == char and char.lower()\\\n in 'abcdefghijklmnopqrstuwxyz']:\n decode_message += correct_char\n return decode_message", "def send_text(msg, up):\n try:\n client = TwilioRestClient(account=TWILIO_ACCOUNT_SID,\n token=TWILIO_AUTH_TOKEN)\n c = client.sms.messages.create(to=up.phone,\n from_=WATTTIME_PHONE,\n body=msg.msg)\n TwilioSMSEvent(user=up.user,\n msg_type=msg.msg_type,\n to_number=up.phone,\n from_number=WATTTIME_PHONE,\n body=msg.msg).save()\n\n debug(\"texted '{}' to {}\".format(msg, str(up.name)))\n return True\n except:\n print (\"Faild message\", up.phone, WATTTIME_PHONE, msg.msg)\n debug(\"failed to text '{}' to {}\".format(msg, str(up.name)))\n return False", "def _handle_ok_ack(string):\n if string.strip() == Parser.OK_MSG:\n return True\n return False", "def _text(self, fromwhom, number, text):\n\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.starttls()\n server.login(self._smsaddress, self._smspw)\n server.sendmail(str(fromwhom), '{}@vtext.com'.format(number),\n str(text))\n server.quit()", "def verify_text(self, text):\n pass", "def degsm(self):\n self.success = False", "def send_message(self, msg):\n if msg is not None:\n try:\n self.node.write(msg.encode(encoding='UTF-8'))\n time.sleep(self.delay)\n except serial.serialutil.SerialTimeoutException:\n self.handle_congestion()\n self.send_message(msg)\n except serial.SerialException:\n self.handle_disconnection()\n self.send_message(msg)\n except:\n print(\"\\n!!!Unexpected error occurred in send_message()!!!\\n\")\n finally:\n return False\n return True", "def test_recipient_not_str_error(\n config,\n):\n sms = YesssSMS.YesssSMS(\"0000000000\", \"2d4faa0ea6f55813\")\n with pytest.raises(ValueError):\n sms.send(176264916361239, \"test\")", "def check_ascii_compliance(plaintext: bytes) -> bool:\n return all(c < 128 for c in plaintext)", "def exit_with_message(error_text: str) -> NoReturn:\n raise PealSpeedParseError(peal_speed, error_text)", "def test_unsupported_chars_error(\n config,\n):\n with requests_mock.Mocker() as m:\n sms = YesssSMS.YesssSMS(\"0000000000\", \"2d4faa0ea6f55813\")\n m.register_uri(\n \"POST\",\n # pylint: disable=protected-access\n sms._login_url,\n status_code=302,\n # pylint: disable=protected-access\n headers={\"location\": sms._kontomanager},\n )\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n sms._kontomanager,\n status_code=200,\n )\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n sms._sms_form_url,\n status_code=200,\n text=TEST_FORM_TOKEN_SAMPLE,\n )\n m.register_uri(\n \"POST\",\n # pylint: disable=protected-access\n sms._send_sms_url,\n status_code=200,\n text=_UNSUPPORTED_CHARS_STRING,\n )\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n sms._logout_url,\n status_code=200,\n )\n with pytest.raises(sms.UnsupportedCharsError):\n sms.send(YESSS_TO, \"test\")", "def isvalidport(txt):\n return txt.isdigit() and int(txt) <= 65535 and int(txt) >= 0", "def isValid(self) :\n try :\n pos = 0\n while self.firstblock[pos] == chr(0) :\n pos += 1\n except IndexError : \n return False\n else : \n firstblock = self.firstblock[pos:]\n if firstblock.startswith(\"\\033E\\033\") or \\\n firstblock.startswith(\"\\033%1BBPIN;\") or \\\n ((pos == 11000) and firstblock.startswith(\"\\033\")) or \\\n (firstblock.startswith(\"\\033*rbC\") and (not self.lastblock[-3:] == \"\\f\\033@\")) or \\\n firstblock.startswith(\"\\033*rB\\033\") or \\\n firstblock.startswith(\"\\033%8\\033\") or \\\n (firstblock.find(\"\\033%-12345X\") != -1) or \\\n (firstblock.find(\"@PJL ENTER LANGUAGE=PCL\\012\\015\\033\") != -1) or \\\n (firstblock.startswith(chr(0xcd)+chr(0xca)) and (firstblock.find(\"\\033E\\033\") != -1)) :\n return True\n else : \n return False", "def sendErrorMessage(msg): #@NoSelf", "def simulate_unsupported_chars_error(valid_connection):\n path = \"YesssSMS.api.YesssSMS.send\"\n with mock.patch(path, side_effect=YesssSMS.YesssSMS.UnsupportedCharsError()):\n yield", "def parseable(message_data):\n if not message_data:\n raise TypeError('message_data must not be None')\n\n acceptable = range(97, 123) + range(65, 91) + range(48, 58) + range(33, 43) + range(44, 48) + [58, 63, 64, 94]\n return any(ord(c) not in acceptable for c in message_data['message'].replace(' ', ''))", "def check_ack_or_nak(message):\n value = message.body[-1]\n\n if value == 0x06:\n return\n elif value == 0x15:\n raise CommandFailure(command_code=message.command_code)\n else:\n raise RuntimeError(\"Unexpected ACK/NAK value (0x%02x)\" % value)", "def validate_ping(result):\n if '0 packets received' in str(result) or 'no answer from' in str(result) or '0 received' in str(result):\n print 'Conectividade - DOWN'\n return False\n print 'Conectividade - OK'\n return True", "def send_error(self, conn, msg, srcif):\n message = {}\n message[SRCE], message[DEST] = ('.').join(srcif.split('.', 3)[:3]) + '.1', msg[SRCE]\n message[TYPE] = NRTE\n message[MESG] = {}\n sending_msg = json.dumps(message).encode()\n conn.sendall(sending_msg)\n return True", "def can_recept(self, text, *args, **kwargs):\n # such slot always can recept (when message is not empty) because it consumes the message\n if text:\n return True\n else:\n return False", "def _check_error(self, ipi):\n\n ipi_error = ipi.communicate(timeout=120)[1].decode(\"ascii\")\n assert \"\" == ipi_error, \"IPI ERROR OCCURED: {}\".format(ipi_error)", "def func_empty_check(self, data):\n check = bytes(data).decode().encode('ascii', 'ignore').decode().lower().rstrip()\n if str(check) == '':\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('command not found'))\n return True", "def handleSystemMessage(self,data):\n message = data.split()[0]\n if message == \"QAError\":\n message,detID = data.split()\n pcaId = self.detectorMapping[detId]\n partition = self.PCAs[pcaId]\n data = {\n \"id\": \"DCS\",\n \"detectorId\": detId,\n \"message\": message,\n }\n self.sendQAMessage(data,partition)\n else:\n super().handleSystemMessage(message)", "def validate_message(self, message):\n\n for char in message:\n if ord(char) < 65 or ord(char) > 90:\n raise ValueError('Invalid message. Enigma Machine only supports messages composed of uppercase letters')", "def text_cell_phone(self, sender, message):\n if self.cell_phone:\n text_message.send_sms(sender, message, self.cell_phone)", "def message(self, text):\n\n if( rpi_device ):\n self.clear()\n for char in text:\n if char == '\\n' or char == '^':\n self.cmd(0xC0) # new line\n else:\n self.cmd(ord(char),True)", "def test_empty_message(config, valid_connection):\n sms = YesssSMS.YesssSMS(LOGIN, YESSS_PASSWD)\n with pytest.raises(ValueError):\n sms.send(YESSS_TO, \"\")\n with pytest.raises(sms.EmptyMessageError):\n sms.send(YESSS_TO, \"\")", "def validaTexto(text,entrada): \n\tvalido=True\n\tif (entrada == \"rut\"):\n\t\tcadena = \"0123456789kK\"\n\tif (entrada == \"num\"):\n\t\tcadena = \"0123456789\"\n\tif (entrada == \"sinSimbolos\"):\n\t\tcadena = \" ,.-abcdefghijklmnñopqrstuvwxyzáéíóúABCDEFGHIJKLMNOPQRSTUVWXYZÁÉÍÓÚ0123456789\"\n\tif (entrada == \"texto\"):\n\t\tcadena = \" abcdefghijklmnñopqrstuvwxyzáéíóúABCDEFGHIJKLMNOPQRSTUVWXYZÁÉÍÓÚ\"\n\tif (entrada == \"rut_login\"):\n\t\tcadena = \"0123456789kK-\"\n\ti=0\n\tstring_num=str(text.encode('utf-8'))\n\tif(len(string_num)==0):\n\t\tvalido=False\n\twhile(valido and (i<len(string_num))):\n\t\tif (not string_num[i] in cadena):\n\t\t\tvalido=False\n\t\ti=i+1\n\treturn valido", "def check_message(m, n_frames, tx_id, data):\n assert len(m.frames) == n_frames\n assert m.tx_id == tx_id\n assert m.data == bytearray(data)", "def validate_encryption(self) -> bool:\n # Receive the first encrypted message from server\n message = self.receive()\n if message != Message.HI:\n print(\"Encryption error! Closing this socket...\")\n return False\n # Send the first encrypted message\n self.send(Message.HI)\n # Receive the encrypted OK message\n message = self.receive()\n if message == Message.OK:\n print(\"Encryption is established.\")\n return True\n else:\n print(\"Encryption error! Closing this socket...\")\n return False", "def valid_message_length(self):\n if self.message_len() > 0:\n if self.message_len() <= self.max_msg_length:\n return True\n return False", "def _has_non_ascii_characters(data_string):\r\n try:\r\n data_string.encode('ascii')\r\n except UnicodeEncodeError:\r\n return True\r\n\r\n return False", "def errorCheck(self):\n\t\twhile 1:\n #check for bad state\n\t\t\tif epics.caget(self.error_bypass) == 1:\n\t\t\t\tout_msg=\"Bypass flag is TRUE\"\n elif epics.caget(self.error_bcs) != 1:\n out_msg=\"BCS tripped\"\n elif epics.caget(self.error_mps) != 0:\n out_msg=\"MPS tripped\"\n elif epics.caget(self.error_gaurdian) != 0:\n out_msg=\"Gaurdian tripped\"\n\t\t\n #elif epics.caget(self.error_und_tmit) < 5.0e7:\n # out_msg=\"UND Tmit Low\"\n else:\n out_msg='Everything Okay'\n\n #exit if the stop button is set\n #if not self.mi.getter.caget(\"SIOC:SYS0:ML03:AO702\"):\n\t\t\tif not epics.caget(\"SIOC:SYS0:ML03:AO702\"):\n break\n\n #set the error check message\n epics.caput (\"SIOC:SYS0:ML00:CA000\",out_msg)\n print out_msg\n\n #break out if error check is bypassed\n if (out_msg==\"Bypass flag is TRUE\"):\n break\n\n #break out if everything is okay\n if (out_msg==\"Everything Okay\"):\n epics.caput(self.error_tripped,0)\n break\n\t\t\t\t#return\n else:\n epics.caput(self.error_tripped,1)\n time.sleep(0.1)", "def send(self, message):\n if self.ser.isOpen():\n self.ser.write(serial.to_bytes(message) + vp21.NULL)\n bytes = self.ser.read(BUF)\n if bytes == vp21.NULL:\n return CMD_SUCCESS\n else:\n return CMD_FAILURE\n log.warning('Command \\\"%s\\\" rejected.')\n else:\n return CMD_FAILURE", "def test_text(self):\n server, client = loopback()\n with pytest.warns(DeprecationWarning) as w:\n server.sendall(b\"x\".decode(\"ascii\"))\n assert \"{0} for buf is no longer accepted, use bytes\".format(\n WARNING_TYPE_EXPECTED\n ) == str(w[-1].message)\n assert client.recv(1) == b\"x\"", "def send_message(self,input_message):\n try: \n self.connection.send('\\r' + input_message + '\\r')\n\n except:\n sys.stderr.write('failed to send message to server \\n') \n return False\n\n return True", "def send_msg(self, text):\n\n if self.__webex_flag__ == 1:\n self.__send_msg_by_webex__(text)\n\n if self.__webex_flag__ == 1:\n self.__send_msg_by_mail__(text)\n\n return", "def test_get_sms_message(self):\n pass", "def validate_message(self, state_id, msg):\n pass", "def func_helo(self, data):\n data_list = bytes(data).decode().encode('ascii', 'ignore').decode().split(' ')\n if data_list[0].lower().rstrip() == 'helo':\n message = '250 {}'.format(self.hostname)\n self.func_sender(message)\n return True", "def test_text(self):\n server, client = loopback()\n with pytest.warns(DeprecationWarning) as w:\n count = server.send(b\"xy\".decode(\"ascii\"))\n assert \"{0} for buf is no longer accepted, use bytes\".format(\n WARNING_TYPE_EXPECTED\n ) == str(w[-1].message)\n assert count == 2\n assert client.recv(2) == b\"xy\"", "def test_dccSendIndecipherablePort(self):\n result = self.assertRaises(\n irc.IRCBadMessage,\n self.client.dcc_SEND,\n self.user,\n self.channel,\n \"foo.txt 127.0.0.1 sd@d\",\n )\n self.assertEqual(str(result), \"Indecipherable port 'sd@d'\")", "def is_valid_raw(command): \n # default state\n valid = True\n \n # split the command into sections\n data_list = command[:-1].split(' ')\n \n # check the command's validity\n if (len(data_list) < 3) or ((data_list[0] != '<READ') and \\\n (data_list[0] != '<WRITE')):\n # if the command is too long and doesn't start corectly then it is \n # invalid \n valid = False\n \n elif (len(data_list[1]) != 5) or not data_list[1].startswith('0x'):\n # if the address field is not the right length and doesnt start \n # wit the hexidecimal identifier then it is invalid\n valid = False\n \n elif (data_list[1][4] != ',') or not is_hex(data_list[1][2:-1]):\n # if the address doean't end with a comma or the number portion is \n # not a hexideciaml number then it is invalid\n valid = False\n \n elif ('WRITE' in data_list[0]) and \\\n any([not is_hex(item) for item in data_list[2:]]):\n # if it is a write command and any item in the data list is not\n # hexidecimal then it is invalid\n valid = False\n \n elif ('READ' in data_list[0]) and \\\n (len(data_list) != 3 or not data_list[2].isdigit()):\n # if it is a read command and there in not a single decimal length\n # specified then the command is invalid\n valid = False \n \n # end if\n \n # print errors associated with commands if required\n if ('READ' in command) and not valid:\n print '*** Invalid READ command, please refer to the'\\\n 'Read me for proper syntax ***' \n \n elif ('WRITE' in command) and not valid:\n print '*** Invalid WRITE command, please refer to the'\\\n 'Read me for proper syntax ***' \n # end if\n \n return valid", "def message(self, text):\n for char in text:\n if char == '\\n':\n self.cmd(0xC0) # next line\n else:\n self.cmd(ord(char),True)", "def message(self, text):\n for char in text:\n if char == '\\n':\n self.cmd(0xC0) # next line\n else:\n self.cmd(ord(char),True)", "def message(self, text):\n for char in text:\n if char == '\\n':\n self.cmd(0xC0) # next line\n else:\n self.cmd(ord(char),True)", "def isOK(ser):\n while 1:\n msg=ser.readline(300)\n if msg.find(\"<\")!=-1:\n break\n if msg.find(\"<OK\")!=-1:\n return True\n return False", "def handle_privmsg(self, ievent):\n\n if ievent.txt and ievent.txt[0] == '\\001':\n self.handle_ctcp(ievent)\n return 1", "def _check_packet_corruption(self, header):\n data_corrupt = False\n if header.msg_type == 0 or header.msg_size == 0 or header.msg_size > 10000:\n if not self._file_corrupt and self._debug:\n print('File corruption detected')\n data_corrupt = True\n self._file_corrupt = True\n\n return data_corrupt", "def send(self, serial_cmnd):\n self.sio.write(serial_cmnd+\"\\n\") # TextIOWrapper object converts the newline character to \"\\r\\n\", this is required by the device \n self.sio.flush() # it is buffering. required to get the data out *now*\n response = self.sio.readline()\n response = response.rstrip() # Trim the newline character\n if (response == \"ok\"):\n return True\n else:\n logging.debug(\"Board response:\" + response) \n return response", "def test_send(self):\n self.inverter.send(b\"\\x00\\x01\\x02\", b\"\")\n received_message = self.sock.recv(4096)\n self.assertEqual(message, received_message)", "def sms():\n def send_sms(number, message):\n #get session bus\n try:\n session_bus = dbus.SessionBus()\n except dbus.exceptions.DBusException:\n click.echo(chalk.red('Have a display you must'))\n return\n\n #check for kdeconnect\n try:\n devices_dbus_obj = session_bus.get_object('org.kde.kdeconnect','/modules/kdeconnect/devices')\n except dbus.exceptions.DBusException:\n click.echo(chalk.red('kdeconnect not installed it appears'))\n return\n\n #get devices ids\n devices_xml = devices_dbus_obj.Introspect(dbus_interface='org.freedesktop.DBus.Introspectable')\n devices_xml = ET.fromstring(devices_xml)\n nodes = devices_xml.findall('node')\n if(len(nodes) is 0):\n click.echo(chalk.red('Devices there are not'))\n return\n deviceIDs = list()\n for node in nodes:\n deviceIDs.append(node.get('name'))\n\n #get devices properties\n deviceID_Props = dict()\n for ID in deviceIDs:\n try:\n device = session_bus.get_object('org.kde.kdeconnect', '/modules/kdeconnect/devices/' + ID)\n deviceProps = device.GetAll('', dbus_interface='org.freedesktop.DBus.Properties')\n deviceID_Props[ID] = deviceProps\n except dbus.exceptions.DBusException:\n #don't create an entry in the dictionary if the object, or a GetAll method does not exist\n pass\n if(len(deviceID_Props) is 0):\n click.echo(chalk.red('Devices there are not'))\n return\n\n #eliminate non sms devices\n devices_no_sms = list()\n for device in deviceID_Props:\n keeping = False\n for plugin in deviceID_Props[device]['supportedPlugins']:\n if('sms' in plugin):\n keeping = True\n if(not keeping):\n devices_no_sms.append(device)\n for device in devices_no_sms:\n del deviceID_Props[device]\n\n #if there are no devices that support sms\n if(len(deviceID_Props) is 0):\n click.echo(chalk.red('Devices that support sms there are not'))\n return\n #elif only one device was found that supports sms\n elif(len(deviceID_Props) is 1):\n click.echo(chalk.yellow('Device using: ' + str(list(deviceID_Props.values())[0]['name'])))\n sendMessage = session_bus.get_object('org.kde.kdeconnect', '/modules/kdeconnect/devices/' + str(list(deviceID_Props.keys())[0]) + '/sms')\n sendMessage.sendSms(number, message, dbus_interface='org.kde.kdeconnect.device.sms')\n return\n #otherwise get user to choose device\n else:\n choice_map = dict()\n for idx, device in enumerate(deviceID_Props, start=1):\n click.echo(chalk.green(str(idx) + ': ' + deviceID_Props[device]['name']))\n choice_map[str(idx)] = device\n choice = click.prompt(chalk.blue('Device, you must select: '), default='1', type=click.Choice(choice_map.keys()))\n #click.echo('you chose: ' + choice_map[the_chosen_device] + ' with id: ' + deviceNames_IDs[choice_map[the_chosen_device]])\n sendMessage = session_bus.get_object('org.kde.kdeconnect', '/modules/kdeconnect/devices/' + choice_map[choice] + '/sms')\n sendMessage.sendSms(number, message, dbus_interface='org.kde.kdeconnect.device.sms')\n return\n\n click.echo(chalk.blue('For whom you want to send an sms'))\n friend_name = input().strip()\n friend_name_lower = friend_name.lower()\n if os.path.isfile(PEOPLE_CONFIG_FILE_PATH):\n with open(PEOPLE_CONFIG_FILE_PATH) as fin:\n contents = yaml.load(fin)\n entries = contents['entries']\n for entry in entries:\n if(friend_name == entry['name'] or friend_name_lower == entry['name']):\n number = entry['mobile']\n break\n if('number' not in locals()):\n click.echo(chalk.red('Friend not found.'))\n else:\n if(len(number) is not 0):\n click.echo(chalk.blue('Message, you must enter: '))\n message = input(':')\n click.echo(chalk.yellow('Device to send sms to ' + number + ' looking for: '))\n send_sms(number, message)\n else:\n click.echo(chalk.red('Friends number not in people file, run `yoda people setup` to add it.'))\n else:\n click.echo(chalk.red('The People file does not exist, run `yoda people setup` to create an entry.'))", "def pes_packet_check_formedness(payload):\n b1 = ord(payload[0])\n b2 = ord(payload[1])\n b3 = ord(payload[2])\n\n b4 = ord(payload[3])\n if b1 != 0 or b2 != 0 or b3 != 1:\n return False\n return True", "def __send_and_receive(self, cmnd, timeout=None):\n\n if not self.is_connected():\n printf(\"Communication| Tried to send a command while robot was not connected!\")\n return \"\"\n\n # Prepare and send the command to the robot\n self.__gen_serial_id()\n cmnd = \"#{} {}\".format(self.serial_id,cmnd)\n printf(\"Coummunication | Send Message: {}, total length: {}\".format(cmnd,len(cmnd)), type=DEBUG)\n if PY3:\n cmndString = bytes(cmnd + \"\\n\", encoding='ascii')\n else:\n cmndString = bytes(cmnd + \"\\n\")\n\n try:\n self.__serial.write(cmndString)\n\n except serial.serialutil.SerialException as e:\n # printf(\"while sending command {}. Disconnecting Serial! \\nError: {}\".format(cmndString, str(e)),type=ERROR)\n self.__isConnected = False\n return \"\"\n\n try:\n if PY3:\n response = str(self.__serial.readline(),encoding='ascii')\n else:\n response = self.__serial.readline()\n if response.startswith(\"${}\".format(self.serial_id)):\n if \"E20\" in response or \"E21\" in response:\n printf(\"Communication| ERROR: send {}, received error from robot: {}\".format(cmndString, response), type=ERROR)\n return \"\"\n response = response.replace('\\n', '')\n response = response.replace('${} '.format(self.serial_id),'')\n printf(\"Communication| [{}] {}{}\".format(cmnd, \" \" * (30 - len(cmnd)), response), type=DEBUG)\n else:\n printf(\"Communication| ERROR: send {}, received error from robot: {}\".format(cmndString, response), type=ERROR)\n # printf(\"Communication| ERROR: received error from robot: {}\".format(response),type=ERROR)\n return \"\"\n return response.lower()\n except serial.serialutil.SerialException as e:\n printf(\"while sending command {}. Disconnecting Serial! \\nError: {}\".format(cmnd,str(e)), type=ERROR)\n self.__isConnected = False\n return \"\"", "def _check(self,err):\r\n if err < 0:\r\n buf_size = 128\r\n buf = create_string_buffer('\\000' * buf_size)\r\n self.nidaq.DAQmxGetErrorString(err,byref(buf),buf_size)\r\n raise RuntimeError('NI-DAQ call failed with error %d: %s'%(err,repr(buf.value)))", "def parseCommand(self, msg):\n if msg == \"\":\n return\n if self.interpreter.debug:\n print \"Modem::parseCommand: \", msg\n if(self.status == Modem.Status.KILL):\n return\n command = msg.split(Interpreter.SEPARATOR)\n if (len(command)==1):\n if (command[0] == 'OK'):\n return self.confirmedMyIstr()\n elif (len(command)==2):\n if (command[0] == 'error'):\n return self.error(int(command[1]))\n elif (len(command)==3):\n if (command[0] == 'send_file'):\n cmd2 = re.sub(\"[^0-9]\", \"\", command[2])\n return self.recvDataFile(command[1],int(cmd2),False)\n elif (command[0] == 'send_stream'):\n cmd2 = re.sub(\"[^0-9]\", \"\", command[2])\n return self.recvDataFile(command[1],int(cmd2),False)\n return self.reset_myself()", "def TestSendRecvMessageTimeoutRaisesCommsError(self):\n self.txrx.timeout = 0.2 # short timeout so we don't hold up testing too much\n\n byte_array_message = bytes(\"\\x01\\x01\\x01\\x01\\x01\\x01\", encoding=DATA_ENCODING)\n txmsg = TxMessage(byte_array_message, num_response_msg=1, expect_eom=True)\n with self.assertRaises(PercivalCommsError):\n self.txrx.send_recv_message(txmsg)\n\n # Receive the bytes from our test socket\n msg = self.connection.recv(6)\n # Verify the bytes are the same as those sent\n self.assertEqual(msg, byte_array_message)", "def check(self, text):\n lt = s = n = 0\n result = False\n for g in text:\n if g in LETTERS and lt < self.letters:\n lt += 1\n if g in NUMBERS and n < self.numbers:\n n += 1\n if g in SYMBOLS and s < self.symbols:\n s += 1\n if n == self.numbers and s == self.symbols and lt == self.letters:\n result = True\n break\n return result", "def barcode_is_valid(s):\n return (bool(re.match(r'^[ATGC]*$',s))\n or barcode_is_10xgenomics(s))", "def _check_has_message(data):\r\n return re.match(r'^:[a-zA-Z0-9_]+\\![a-zA-Z0-9_]+@[a-zA-Z0-9_]+'\r\n r'\\.tmi\\.twitch\\.tv '\r\n r'PRIVMSG #[a-zA-Z0-9_]+ :.+$', data)", "def isValid(text):\n return bool(re.search(r'\\b((kill|stop) the (alarm|clock|music))\\b', text, re.IGNORECASE))", "def test_sendMessageInvalidCommand(self):\n error = self.assertRaises(\n ValueError, self.p.sendMessage, \" \", \"param1\", \"param2\"\n )\n self.assertEqual(\n str(error),\n \"Somebody screwed up, 'cuz this doesn't look like a command to \" \"me: \",\n )", "def process_dead_command(self):\n command_terminal = adapter_serializers.CommandTerminal(data=self.command)\n if not command_terminal.is_valid():\n logger.error('Receive an invaid data : {}'.format(command_terminal.format_errors()))\n raise natrix_exceptions.TriggerBugException(\n message=u'command is invalid: {}'.format(command_terminal.format_errors())\n )\n\n if not command_terminal.process():\n # TODO:\n logger.error('failed')\n else:\n logger.info('success')", "def _validate_text(self, text):\n if text is None:\n return\n if not (0 < len(text) <= self.TEXT_MAX):\n raise ValidationError", "def test_message_string():\n result = True\n\n message = msg.Message()\n size = 0\n for i in range(num_it):\n message.appendString(str(i) + \"azertyuiopqsdfghjklmwxcvbn\")\n size += len(str(i) + \"azertyuiopqsdfghjklmwxcvbn\")\n if message.length != msg.HEADER_SIZE + (i+1)*msg.intStruct.size + size:\n print(\"Size is \", message.length, \" but should be \", msg.HEADER_SIZE + (i+1)*msg.intStruct.size + size)\n print(\"Error : message.appendString\")\n result = False\n\n message.resetCursor()\n for i in range(num_it):\n r = message.readString()\n if r != str(i) + \"azertyuiopqsdfghjklmwxcvbn\":\n print(r, \" vs \", str(i) + \"azertyuiopqsdfghjklmwxcvbn\")\n print(\"Error : message.read/appendString\")\n result = False\n\n return result", "def exit_with_message(error_text: str) -> NoReturn:\n raise StartRowParseError(start_row, error_text)", "def test_dccSendIndecipherableAddress(self):\n result = self.assertRaises(\n irc.IRCBadMessage,\n self.client.dcc_SEND,\n self.user,\n self.channel,\n \"foo.txt #23 sd@d\",\n )\n self.assertEqual(str(result), \"Indecipherable address '#23'\")", "def _checkMode(mode):\n\n if not isinstance(mode, str):\n raise TypeError('The {0} should be a string. Given: {1!r}'.format(\"mode\", mode))\n\n if mode not in [MODE_RTU, MODE_ASCII]:\n raise ValueError(\"Unreconized Modbus mode given. Must be 'rtu' or 'ascii' but {0!r} was given.\".format(mode))", "def test_missingCommand(self):\n self.server.parse_command(b\"001\")\n\n self.assertEqual(self.transport.value(),\n b\"001 BAD Missing command\\r\\n\")\n\n self.server.connectionLost(\n failure.Failure(error.ConnectionDone(\"Done\")),\n )", "def handleSystemMessage(self,data):\n message = data.split()[0]\n if message == \"detectorError\":\n #handle dcs detector error\n message,detId = data.split()\n pcaId = self.detectorMapping[detId]\n partition = self.PCAs[pcaId]\n data = {\n \"id\": \"DCS\",\n \"detectorId\": detId,\n \"message\": message,\n }\n self.sendDCSMessage(data,partition)\n else:\n super().handleSystemMessage(message)", "def ignores_bad_seqno():\n test_str = \"cs144--cs144--cs144--cs144--cs144--cs144--cs144--cs144\\n\"\n bad_seqno_str = DEBUG_BAD_SEQNO + \"cs144cs144cs144cs144cs144cs144cs144cs144\\n\"\n server = start_server()\n client = start_client(reference=True)\n\n # Send full segment.\n write_to(client, test_str)\n time.sleep(TEST_TIMEOUT)\n if read_from(server, num_lines=1) != test_str:\n return False\n segments = read_segments_from(server)\n first_segment = segments[0] if len(segments) > 0 else None\n\n # Write the bad segment. Nothing should be read from the server and no\n # ACKs should be sent.\n write_to(client, bad_seqno_str)\n time.sleep(TEST_TIMEOUT)\n if read_from(server, num_lines=1) == bad_seqno_str:\n return False\n\n # Make sure no ACKs are sent to the bad segment, or if an ACK is sent,\n # it is a duplicate ACK to a previous segment.\n segments = read_segments_from(server)\n if not segments:\n return False\n for segment in segments:\n if \"ACK\" in segment.flags and segment.source_port == CLIENT_PORT and \\\n (first_segment is None or segment.ackno != first_segment.ackno):\n return False\n\n return True", "def _msim_message_test(\n self,\n ad_mo,\n ad_mt,\n mo_sub_id,\n mt_sub_id, msg=\"SMS\",\n max_wait_time=MAX_WAIT_TIME_SMS_RECEIVE,\n expected_result=True):\n\n if msg == \"SMS\":\n for length in self.message_lengths:\n message_array = [rand_ascii_str(length)]\n if not sms_send_receive_verify_for_subscription(\n self.log,\n ad_mo,\n ad_mt,\n mo_sub_id,\n mt_sub_id,\n message_array,\n max_wait_time):\n ad_mo.log.warning(\n \"%s of length %s test failed\", msg, length)\n return False\n else:\n ad_mo.log.info(\n \"%s of length %s test succeeded\", msg, length)\n self.log.info(\"%s test of length %s characters succeeded.\",\n msg, self.message_lengths)\n\n elif msg == \"MMS\":\n for length in self.message_lengths:\n message_array = [(\"Test Message\", rand_ascii_str(length), None)]\n\n if not mms_send_receive_verify(\n self.log,\n ad_mo,\n ad_mt,\n message_array,\n max_wait_time,\n expected_result):\n self.log.warning(\"%s of body length %s test failed\",\n msg, length)\n return False\n else:\n self.log.info(\n \"%s of body length %s test succeeded\", msg, length)\n self.log.info(\"%s test of body lengths %s succeeded\",\n msg, self.message_lengths)\n return True", "def test_send(self):\n msg_flag = self.instance.send(self.msg_short)\n assert(msg_flag)\n msg_flag, msg_recv = self.driver.recv(self.timeout)\n assert(msg_flag)\n nt.assert_equal(msg_recv, self.msg_short)", "def test_170411_jabber_error(self):\n spc = parser(get_file('PFWF38_empty.txt'))\n j = spc.get_jabbers('')\n self.assertEquals(j[0][0],\n (\"The Storm Prediction Center issues Day 3-8 Fire \"\n \"Weather Outlook at Apr 11, 19:54z \"\n \"http://www.spc.noaa.gov/products/fire_wx/\"\n \"2017/20170413.html\"))", "def _check_command(self, resp, prompt):\n for line in resp.split(NEWLINE):\n if line.startswith('?'):\n raise InstrumentProtocolException('error processing command (%r)', resp[1:])\n if line.startswith('*'): # response\n if not valid_response(line):\n raise InstrumentProtocolException('checksum failed (%r)', line)", "def send_error(msg):\n\n print(msg)", "def send_command(self, data):\n try:\n self.write(data)\n reply = self.read_line()\n \n if reply == \"{}\":\n pass\n else:\n print \"send_command: received bad reply %s\" % (reply)\n sys.exit(1)\n except Exception:\n raise", "def isSpamSMS(textLine):\n\treturn re.sub(\"[\\^w]\", \" \", textLine).split()[0].lower() == \"spam\"", "def callback_message(self, conn, mess):\n if (mess.getFrom().getStripped() == config.BOT_IDENTITY[\"username\"]) or (get_sender_username(mess) == config.CHATROOM_FN):\n logging.debug(\"Ignore a message from myself\")\n return False\n\n message = \"\"\n for character in TopGun.CHARACTERS:\n if mess.getBody().find(\"(%s)\" % character) != -1:\n message += \"(%s) %s \" % (character, self.topgun.get_random(character))\n if message:\n self.send(mess.getFrom(), message, message_type=mess.getType())", "def error(self,text):\n if type(text) in (bytes, str):\n T = text\n else:\n # list probably:\n T = '\\n'.join(text)\n print(('-'*60))\n print(T)\n print(('='*60))\n return T", "def generate_message(self, mtu):\r\n raise SystemExit(self.sm.__end_msg__)", "def test_sendMessage(self):\n self.p.sendMessage(\"CMD\", \"param1\", \"param2\")\n self.check(\"CMD param1 param2\\r\\n\")", "def test_dccSendMalformedRequest(self):\n result = self.assertRaises(\n irc.IRCBadMessage, self.client.dcc_SEND, self.user, self.channel, \"foo\"\n )\n self.assertEqual(str(result), \"malformed DCC SEND request: ['foo']\")", "def test_unsupportedCommand(self):\n self.server.lineReceived(b\"001 HULLABALOO\")\n self.assertEqual(self.transport.value(),\n b\"001 BAD Unsupported command\\r\\n\")", "def TestSendRecvMessageSocketRaisesCommsError(self):\n # close the dummy socket\n self.s.close()\n self.txrx.timeout = 0.2 # short timeout so we don't hold up testing too much\n byte_array_message = bytes(\"\\x09\\x08\\x07\\x06\\x05\\x04\", encoding=DATA_ENCODING)\n txmsg = TxMessage(byte_array_message, num_response_msg=1, expect_eom=True)\n with self.assertRaises(PercivalCommsError):\n self.txrx.send_recv_message(txmsg)", "def _force_drop(self, msg) -> bool:\n return self.ALWAYSDROP_TEXT in msg", "def validacion(texto):\n bandera = False\n for caracter in texto:\n if caracter != '0' and caracter != '1':\n bandera = True\n if bandera == True:\n print(\"El texto ingresado no es binario\")\n return bandera", "def test_20_phonenumbers_UnicodeDecodeError(self):\n number_phone = self.samples[2]\n with self.assertRaises(osv.except_osv):\n self.pn._symbol_set_char(number_phone)" ]
[ "0.68313634", "0.67669386", "0.61789453", "0.60250825", "0.579996", "0.57878447", "0.5629621", "0.5615489", "0.55831856", "0.5521425", "0.54795057", "0.5427085", "0.54028183", "0.53781205", "0.5342483", "0.53372145", "0.533206", "0.5329173", "0.5322715", "0.53093356", "0.52896047", "0.52851856", "0.5264613", "0.52627045", "0.52588075", "0.5254264", "0.52520216", "0.52446926", "0.5242512", "0.52265406", "0.521335", "0.51882344", "0.5188132", "0.5173926", "0.51716626", "0.5147659", "0.5136588", "0.5124349", "0.5115969", "0.5112753", "0.51097095", "0.5107577", "0.5105859", "0.5104152", "0.5097906", "0.5085417", "0.5084768", "0.5072784", "0.50713557", "0.50613385", "0.505693", "0.50462484", "0.5040456", "0.5026494", "0.50234103", "0.50162363", "0.50162363", "0.50162363", "0.5016114", "0.5004503", "0.49964744", "0.49860892", "0.49833512", "0.49760565", "0.49750358", "0.4959897", "0.49594387", "0.49581075", "0.49578685", "0.49525374", "0.49424815", "0.49420202", "0.49418557", "0.4939509", "0.49305516", "0.49274", "0.4926123", "0.49233118", "0.4917237", "0.49152774", "0.4913048", "0.49129426", "0.49095526", "0.49079183", "0.4903781", "0.49014363", "0.49005476", "0.48996878", "0.48933253", "0.4892266", "0.48911464", "0.48900697", "0.48887613", "0.48881963", "0.48865286", "0.4884665", "0.48840636", "0.4877761", "0.48766768", "0.4876446" ]
0.71887165
0
Returns True if the content type is valid.
def is_valid_content_type(cls, content_type: str) -> bool: return content_type in cls.CONTENT_TYPES.value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_valid_ct(content_type: str) -> bool:\n content_type = content_type.strip()\n return _is_valid_regex(CT_CONTENT_TYPE_REGEX_PATTERN, content_type)", "def _is_valid_content_type_format(content_type: str) -> bool:\n return (\n _is_valid_ct(content_type)\n or _is_valid_pt(content_type)\n or _is_valid_set(content_type)\n or _is_valid_list(content_type)\n or _is_valid_dict(content_type)\n or _is_valid_union(content_type)\n or _is_valid_optional(content_type)\n )", "def is_schema_types_valid(self):\n valid_types = {\"string\", \"int\", \"float\", \"datetime\", \"boolean\"}\n invalid_types = []\n if self.schema_content:\n for dataset in self.schema_content:\n attributes = self.schema_content.get(dataset)\n for attr in attributes.values():\n type_to_validate = attr.get(\"type\")\n if type_to_validate not in valid_types:\n invalid_types.append(type_to_validate)\n\n if invalid_types:\n error_message, error_code = Errors.modeling_rule_schema_types_invalid(\n invalid_types\n )\n if self.handle_error(\n error_message, error_code, file_path=self.file_path\n ):\n self._is_valid = False\n return False\n return True", "def is_valid(self):\n try:\n self.validate()\n return True\n except (TypeError, ValueError) as e:\n return False", "def _is_valid_pt(content_type: str) -> bool:\n content_type = content_type.strip()\n return content_type in SPECIFICATION_PRIMITIVE_TYPES", "def is_readable(self, content_type):\n return False", "def _validate_content_type(\n content_type: str, content_name: str, performative: str\n) -> Tuple[bool, str]:\n if not _is_valid_content_type_format(content_type):\n return (\n False,\n \"Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.\".format(\n content_name,\n performative,\n ),\n )\n\n return (\n True,\n \"Type of content '{}' of performative '{}' is valid.\".format(\n content_name, performative\n ),\n )", "def is_valid(self):\n\n return True", "def _is_compositional_type(content_type: str) -> bool:\n for valid_compositional_type in (\n SPECIFICATION_COMPOSITIONAL_TYPES + PYTHON_COMPOSITIONAL_TYPES\n ):\n if content_type.startswith(valid_compositional_type):\n return True\n return False", "def is_valid(self):\n return _drafter.check_blueprint(self.content)", "def valid(self) -> bool:\n return True", "def _is_valid_set(content_type: str) -> bool:\n content_type = content_type.strip()\n\n if not content_type.startswith(\"pt:set\"):\n return False\n\n if not _has_matched_brackets(content_type):\n return False\n\n if not _has_brackets(content_type):\n return False\n\n sub_types = _get_sub_types_of_compositional_types(content_type)\n if len(sub_types) != 1:\n return False\n\n sub_type = sub_types[0]\n return _is_valid_pt(sub_type)", "def is_valid(self):\n return (self.time is not None\n and self.author is not None\n and self.content is not None)", "def is_content_malformed(self):\n return self._tag == 'content_malformed'", "def is_content_malformed(self):\n return self._tag == 'content_malformed'", "def type_valid(self):\n return contain_in_list_equal(self._type_or_ref, PARAM_RES_TYPES)", "def is_valid(self): # -> bool:\n ...", "def is_valid_type(self, question_type):\n\t\treturn question_type in self.valid_types", "def _is_valid(self, value):\n\n # Entities have an istypeof method that can perform more sophisticated\n # type checking.\n if hasattr(self._type, \"istypeof\"):\n return self._type.istypeof(value)\n else:\n return isinstance(value, self._type)", "def check_eligible_mimetype(self, ctype, uid):\n self.helper.log_debug(\n 'check_eligible_mimtype: checking content-type %s of msg uid %s' %\n (ctype, uid))\n if ctype == \"application/zip\":\n return True\n elif ctype == \"application/gzip\":\n return True\n elif ctype == \"application/x-gzip\":\n return True\n elif ctype == \"application/octet-stream\":\n # Non-standard mimetype used by Amazon SES dmarc reports\n return True\n elif ctype == \"application-x-gzip\":\n # Non-standard mimetype used by Comcast dmarc reports\n return True\n elif ctype == \"application/x-zip-compressed\":\n # Non-standard mimetype used by Yahoo dmarc reports\n return True\n elif ctype == \"application/xml\":\n return True\n elif ctype == \"text/xml\":\n return True\n else:\n self.helper.log_debug(\n 'check_eligible_mimtype: skipping content-type %s of msg uid %s' %\n (ctype, uid))\n return False", "def valid(self) -> bool:\n pass", "def verify_type(self, obj):\n return isinstance(obj, self.type_)", "def validate(self):\n self._check_type()", "def is_valid_type(self, attr: Optional[str] = None) -> bool:\n try:\n self.validate_type(attr)\n except TypeError:\n return False\n return True", "def validate(self,value):\r\n return type(value) is self.datatype", "def _is_valid_dict(content_type: str) -> bool:\n content_type = content_type.strip()\n\n if not content_type.startswith(\"pt:dict\"):\n return False\n\n if not _has_matched_brackets(content_type):\n return False\n\n if not _has_brackets(content_type):\n return False\n\n sub_types = _get_sub_types_of_compositional_types(content_type)\n if len(sub_types) != 2:\n return False\n\n sub_type_1 = sub_types[0]\n sub_type_2 = sub_types[1]\n return _is_valid_pt(sub_type_1) and _is_valid_pt(sub_type_2)", "def validate_content_type(uri: str) -> None:\n try:\n response = requests.head(uri)\n response.raise_for_status()\n except RequestException as e:\n raise ValidationError(f\"groundtruth content type ({uri}) validation failed\") from e\n\n content_type = response.headers.get(\"Content-Type\", \"\")\n if content_type not in SUPPORTED_CONTENT_TYPES:\n raise ValidationError(f\"groundtruth entry has unsupported type {content_type}\")", "def IsValid(self):\n return False", "def check_content_type():\n return request.content_type == \"application/json\"", "def valid_xss_content_type(http_res):\n # When no content-type is returned, browsers try to display the HTML\n if \"content-type\" not in http_res.headers:\n return True\n\n # else only text/html will allow javascript (maybe text/plain will work for IE...)\n if \"text/html\" in http_res.headers[\"content-type\"]:\n return True\n return False", "def check_type(self):\n return True", "def IsValid(self):\n return len(self.Text) > 0", "def is_valid(self) -> bool:\n from redun.scheduler import get_current_scheduler\n\n if self.type_name != self.__handle__.class_name:\n # Handle class_name might be out of date from deserialization.\n return False\n\n scheduler = get_current_scheduler()\n assert scheduler\n return scheduler.backend.is_valid_handle(self)", "def is_image(self):\r\n # we can only get this if we have headers\r\n LOG.debug('content type')\r\n LOG.debug(self.content_type)\r\n if (self.content_type is not None and\r\n self.content_type.lower() in IMAGE_TYPES.values()):\r\n return True\r\n else:\r\n return False", "def is_valid(self) -> bool:\n return self.errors == \"\"", "def is_valid(self) -> bool:\r\n try:\r\n self.shape\r\n return True\r\n except ValueError:\r\n return False", "def check_type(content):\n return (isinstance(content, Elem) or type(content) == Text or\n (type(content) == list and all([type(elem) == Text or\n isinstance(elem, Elem)\n for elem in content])))", "def is_valid(self):\n return self._is_valid", "def isvalid(self):\n validName = not StringExtension.is_none_or_white_space(self._name)\n validValue = not StringExtension.is_none_or_white_space(self._value)\n if validName and validValue:\n return True\n return False", "def is_valid(path):\n with open(path, 'rb') as handle:\n size = os.fstat(handle.fileno()).st_size\n try:\n mgz.header.parse_stream(handle)\n mgz.body.meta.parse_stream(handle)\n while handle.tell() < size:\n mgz.body.operation.parse_stream(handle)\n print('valid')\n return True\n except ConstructError:\n print('invalid')\n return False", "def _file_can_be_compressed(filename):\n content_type = ''\n with open(filename, 'rb') as f:\n content_type = _get_content_type(f)\n return content_type in TEXT_TYPES", "def test_validate(self):\n content_id = 1\n\n for ct in self.CONTENT_TYPES:\n self.assertEquals(ct, self.TCT.validate_content_type(ct))\n content = self.TCT.validate_content_type_and_id(ct, content_id)\n self.assertTrue(content)\n content_id += 1", "def is_valid(self):\n return self._valid", "def valid_type(self, data, errors):\n\t\terrors.append(\"{}: valid_type() must be implemented for SchemaBase derived classes.\".format(self.__class__.__name__))\n\t\treturn False", "def validate_format(self):\n return all(\n [\n self.validate_header_keyword(),\n self.validate_type_keyword(),\n self.validate_type_annotations(),\n self.validate_unique_header(),\n self.validate_against_header_count(),\n ]\n )", "def has_mixed_content(self) -> bool:\n raise NotImplementedError()", "def valid_media_type(media_type):\n return media_type in ACCEPTED_MEDIA_TYPES", "async def is_valid_page_content(self, page_content: bytes) -> bool:\n try:\n image = Image.open(BytesIO(page_content))\n return True\n except UnidentifiedImageError:\n return False", "def is_valid(self):\n self.clean()\n return not bool(self.errors)", "def is_html(self):\r\n return self.__content_type == html_ctype", "def is_good_response(self, resp):\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1)", "def _is_valid_list(content_type: str) -> bool:\n content_type = content_type.strip()\n\n if not content_type.startswith(\"pt:list\"):\n return False\n\n if not _has_matched_brackets(content_type):\n return False\n\n if not _has_brackets(content_type):\n return False\n\n sub_types = _get_sub_types_of_compositional_types(content_type)\n if len(sub_types) != 1:\n return False\n\n sub_type = sub_types[0]\n return _is_valid_pt(sub_type)", "def isValidForSchema(schema):\n\n return True", "def is_valid_file(self, file_path):\n return True", "def is_html(self):\n return self.__content_type == html_ctype", "def is_valid(self, attribute: Attribute) -> bool:\n return self.get_data_type() == attribute.type", "def is_valid(self):\n return not self.errors", "def is_valid(self) -> bool:\n return len(self.validate()) == 0", "def is_good_response(self, resp):\r\n\t\tcontent_type = resp.headers['Content-Type'].lower()\r\n\t\treturn (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1)", "def is_valid(self):\n self.logger.debug(\"In is_valid.\")\n\n document = self._get_raw_doc()\n\n session = iHMPSession.get_session()\n self.logger.info(\"Got iHMP session.\")\n\n (valid, _error_message) = session.get_osdf().validate_node(document)\n\n if 'associated_with' not in self._links.keys():\n valid = False\n\n self.logger.debug(\"Valid? %s\", str(valid))\n\n return valid", "def is_valid_type(type):\n return type in type_to_adapter", "def is_expected_content_type(\n response_content_type: str, expected_content_type: str\n) -> bool:\n if expected_content_type == \"application/json\":\n return json_re.match(response_content_type) is not None\n return expected_content_type in response_content_type", "def _is_valid_optional(content_type: str) -> bool:\n content_type = content_type.strip()\n\n if not content_type.startswith(\"pt:optional\"):\n return False\n\n if not _has_matched_brackets(content_type):\n return False\n\n if not _has_brackets(content_type):\n return False\n\n sub_types = _get_sub_types_of_compositional_types(content_type)\n if len(sub_types) != 1:\n return False\n\n sub_type = sub_types[0]\n return (\n _is_valid_ct(sub_type)\n or _is_valid_pt(sub_type)\n or _is_valid_set(sub_type)\n or _is_valid_list(sub_type)\n or _is_valid_dict(sub_type)\n or _is_valid_union(sub_type)\n )", "def is_valid(self):\n self.logger.debug(\"In is_valid.\")\n\n document = self._get_raw_doc()\n\n session = iHMPSession.get_session()\n self.logger.info(\"Got iHMP session.\")\n\n # _error_message is intentionally unused\n (valid, _error_message) = session.get_osdf().validate_node(document)\n\n if 'prepared_from' not in self._links.keys():\n self.logger.error(\"Must have a 'prepared_from' linkage.\")\n valid = False\n\n self.logger.debug(\"Valid? %s\", str(valid))\n\n return valid", "def isValid(self):\n return self._valid", "def is_valid(self, value) -> 'True|str':\n if self.base_type is not None and not isinstance(value, self.base_type):\n return f'Value {value} is not type of {self.base_type}.'\n return True", "def is_valid(self):\n raise NotImplementedError", "def is_valid(self):\n if self.flow_id is None:\n return False\n elif self.protocol is None:\n return False\n elif self.dst_addr is None:\n return False\n elif self.dst_port is None:\n return False\n elif self.pattern is None:\n return False\n else:\n return True", "def _is_valid(self):\n self._is_allows_valid()\n self._is_denies_valid()", "def isValid(self):\n return self.valid", "def is_valid(self):\n self.errors = {}\n self._process_data()\n self._validate_changes()\n return not self.errors", "def is_valid_license_type(self):\n clean = self.license_type.lower().replace('-', ' ')\n return clean not in INVALID_LICENSE_TYPE", "def __bool__(self):\n return self.isValid()", "def is_valid(self):\n # Check blocks\n for block in self.blocks.values():\n # Non-optional blocks must be enabled\n if (\n block.structure.number_non_optional_data() > 0\n and not block.enabled\n and block.is_allowed()\n ):\n self.last_error = (\n f'Required block \"{block.block_header.name}\" not enabled'\n )\n return False\n # Enabled blocks must be valid\n if block.enabled and not block.is_valid:\n self.last_error = f'Invalid block \"{block.block_header.name}\"'\n return False\n\n return True", "def is_valid(self):\n # Check if required fields are filled.\n if self.post_url == \"\" or self.post_url_hash == \"\" or self.title == \"\":\n return False\n\n # Check if the hash of the URL matches the hash field.\n if self.post_url_hash != url_to_hashkey(self.post_url):\n return False\n\n return True", "def isValid(self):\n return _libsbml.XMLError_isValid(self)", "def _is_valid(self, *args, **kwargs):\n fn = args[0]\n if not fn.endswith(\".h5\"):\n return False\n try:\n with h5py.File(fn, \"r\") as f:\n if \"arbor_type\" not in f.attrs:\n return False\n if f.attrs[\"arbor_type\"].astype(str) != \"ArborArbor\":\n return False\n except BaseException:\n return False\n return True", "def _is_valid(self, url: ParseResult):\n\n if (\n re.match('(.*).' + self.netloc, url.netloc) is None or\n re.match('(.*)\\+[0-9]*$', url.path) is not None or\n re.match('(.*)javascript:(.*)', url.path) is not None\n ):\n return False\n\n return True", "def is_file_type_error(self):\n return self._tag == 'file_type_error'", "def isValidCTag(value):\n try:\n value = zlib.decompress(value)\n except zlib.error:\n return False\n try:\n WebDAVDocument.fromString(value)\n return True\n except ValueError:\n return False", "def _check(self, token_type):\n if self._is_at_end():\n return False\n\n return self._peek().token_type == token_type", "def _valid_input_type(self, input_type):\n # pylint: disable=W0613, R0201\n return True", "def is_valid_geometry(self, value: List) -> bool:\n\n def check_geom(geom):\n if isinstance(geom, (Point, MultiPoint)):\n out = 'point' in self._permitted_geometries\n if not out:\n logger.error('Not allowed point type geometry components')\n return out\n elif isinstance(geom, (LineString, MultiLineString)):\n out = 'line' in self._permitted_geometries\n if not out:\n logger.error('Not allowed line type geometry components')\n return out\n elif isinstance(geom, (Polygon, MultiPolygon)):\n out = 'polygon' in self._permitted_geometries\n if not out:\n logger.error('Not allowed polygon type geometry components')\n return out\n elif isinstance(geom, GeometryCollection):\n out = True\n for entry in geom.geometries:\n out &= check_geom(entry)\n return out\n else:\n raise TypeError('Got unexpected geometry type `{}`'.format(type(geom)))\n\n if self._permitted_geometries is None or value is None:\n return True\n\n if isinstance(value, str):\n return value.lower().strip() in self._permitted_geometries\n if not isinstance(value, Geometry):\n raise TypeError('Got unexpected geometry type `{}`'.format(type(value)))\n return check_geom(value)", "def isValid(self):\n return self.file_name != \"\" and self.line_number != 0", "def typeValidator(self, food_type):\n if type(food_type) != str:\n API.abort(400, error_messages[16]['int_type'])\n\n # check if the contents of title have characters between a-z and A-Z\n elif not re.match(r\"(^[a-zA-Z_ ]+$)\", food_type) or food_type.isspace():\n API.abort(\n 400, error_messages[17]['wrong_format_ty'])\n\n return True", "def is_valid(self):\r\n raise NotImplementedError", "def _is_valid_type(_type: Type[Any]) -> bool:\n\n if _type in _TYPE_MAP:\n return True\n\n if not inspect.isclass(_type):\n return False\n\n return issubclass(_type, Table)", "def has_acceptable_type(self, value):\n if not value:\n return False\n if super().has_acceptable_type(value):\n return True\n # Hmmm ok maybe we're running under IPython:\n try:\n import IPython\n return isinstance(value, IPython.kernel.zmq.iostream.OutStream)\n except ImportError:\n return False", "def has_complex_content(self) -> bool:\n raise NotImplementedError()", "def is_valid(self) -> bool:\n return all(\n (\n not self.author,\n self.unit,\n )\n )", "def __datatype_check(self, record_attribute, attribute_schema):\n if 'INT' in attribute_schema[TYPE_KEY].upper():\n if record_attribute.isdigit():\n return True\n elif attribute_schema[TYPE_KEY].upper() in DECIMAL_TYPES:\n if record_attribute.isdecimal():\n return True\n elif 'CHAR' in attribute_schema[TYPE_KEY].upper() \\\n or 'TEXT' in attribute_schema[TYPE_KEY].upper():\n if type(record_attribute) is str:\n return True\n else:\n IS_VALID_FILE = False\n return False", "def _is_message_valid(message):\n return isinstance(message, ev_envelope.Envelope)", "def valid_for(obj):\n\n if not obj.filedata:\n return False\n\n #hexstring = \"cffaedfe07000001030000800200\"\n return True", "def _verify_content(state_content_list):\n CONTENT_ITEM_SCHEMA = [\n ('type', basestring), ('value', basestring)]\n ALLOWED_CONTENT_TYPES = ['text', 'image', 'video']\n\n for content_item in state_content_list:\n utils.verify_dict_keys_and_types(content_item, CONTENT_ITEM_SCHEMA)\n if content_item['type'] not in ALLOWED_CONTENT_TYPES:\n raise Exception('Unsupported content type %s.' %\n content_item['type'])", "def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n file = request.files['file']\n if not file:\n self.file.errors.append('Could not find your file.')\n return False\n\n if not allowed_file(file.filename):\n self.file.errors.append('Only excel files can be uploaded (only .xls or .xlsx )')\n return False\n\n return True", "def is_content_supported(self, cbas):\n\n # 1\n if self.accept_all_content:\n return SupportInfo(True)\n\n # 2\n if len(self.supported_content.filter(content_binding=cbas.content_binding, subtype=None)) > 0:\n return SupportInfo(True)\n\n # 2a (e.g., subtype = None so #3 would end up being the same check as #2)\n if not cbas.subtype: # No further checking can be done\n return SupportInfo(False)\n\n # 3\n if len(self.supported_content.filter(content_binding=cbas.content_binding, subtype=cbas.subtype)) > 0:\n return SupportInfo(True)\n\n # 4\n return SupportInfo(False)", "def __bool__(self):\n return self.is_valid", "def _should_send_binary(self) -> bool:\n if not self.binary_support:\n return False\n\n content_type = self._get_content_type()\n if not content_type.startswith(self.non_binary_content_type_prefixes):\n return True\n\n content_encoding = self._get_content_encoding()\n # Content type is non-binary but the content encoding might be.\n return \"gzip\" in content_encoding.lower()", "def isValid(self):\n ret = libxml2mod.xmlParserGetIsValid(self._o)\n return ret", "def is_supported_context_type(content_type: str):\n return (\n content_type == 'application/pdf'\n or content_type == 'text/csv'\n or content_type == CONTENT_TYPE_ZIP\n or content_type == 'application/octet-stream'\n )" ]
[ "0.7517771", "0.7417243", "0.70675975", "0.6998834", "0.68573606", "0.6832993", "0.67981493", "0.6714344", "0.6592863", "0.6578096", "0.6575198", "0.6568513", "0.6541694", "0.65321887", "0.65321887", "0.6491889", "0.6491021", "0.6470095", "0.6450657", "0.63938487", "0.63921374", "0.63828534", "0.63796335", "0.6365822", "0.63636667", "0.63577056", "0.6356282", "0.63446987", "0.6342855", "0.63200486", "0.631802", "0.62756664", "0.62695897", "0.6263529", "0.6262805", "0.62618774", "0.62542164", "0.62503755", "0.62466794", "0.6215823", "0.62099576", "0.6209446", "0.61904323", "0.61898404", "0.6183354", "0.61832935", "0.6174065", "0.6169665", "0.61601126", "0.61437786", "0.61328524", "0.6122743", "0.61168987", "0.6097153", "0.6079923", "0.60762477", "0.6069927", "0.60668516", "0.6056729", "0.6053437", "0.6043884", "0.6037401", "0.60326314", "0.6024633", "0.6021913", "0.6021446", "0.6008705", "0.59987646", "0.59961146", "0.59847224", "0.598406", "0.59761167", "0.59664494", "0.59653586", "0.59536356", "0.5939822", "0.5938509", "0.5930252", "0.5892745", "0.5892365", "0.58899194", "0.58857036", "0.5877371", "0.5873882", "0.58687425", "0.58655953", "0.5863261", "0.58567506", "0.5848401", "0.5841331", "0.58297664", "0.58295316", "0.58243155", "0.5823126", "0.58214206", "0.58204204", "0.5819084", "0.58133626", "0.58099407", "0.5808638" ]
0.8173388
0
Constructor for facebook sdk
def init_fb(self, **kwargs): try: self.graph = facebook.GraphAPI(access_token=fb_token, version='2.4') except Exception as e: sys.exit(str(e))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, access_token, endpoint='/me',\r\n version='2.5'):\r\n self.access_token = access_token\r\n self.endpoint = endpoint", "def initialize_facebook():\n session = FacebookSession(APP_ID, APP_SECRET, ACCESS_TOKEN)\n return FacebookAdsApi(session)", "def __init__(self, access_token):\n self.access_token = access_token", "def facebook(self, facebook):\n\n self._facebook = facebook", "def __init__(__self__, *,\n app_id: pulumi.Input[str],\n is_enabled: pulumi.Input[bool],\n app_secret: Optional[pulumi.Input[str]] = None,\n pages: Optional[pulumi.Input[Sequence[pulumi.Input['FacebookPageArgs']]]] = None):\n pulumi.set(__self__, \"app_id\", app_id)\n pulumi.set(__self__, \"is_enabled\", is_enabled)\n if app_secret is not None:\n pulumi.set(__self__, \"app_secret\", app_secret)\n if pages is not None:\n pulumi.set(__self__, \"pages\", pages)", "def __init__(self, access_token=None):\n self.access_token = access_token", "def __init__(self, user_id, token):\n\n self.user_id = user_id\n self.buttons = {}\n self.token = token\n self.quick_reply_uri = \"https://graph.facebook.com/v10.0/me/messages?access_token=\"+self.token\n self.url_button_uri = \"https://graph.facebook.com/v2.6/me/messages?access_token=\"+self.token\n self.text_uri = 'https://graph.facebook.com/v9.0/me/messages?access_token='+self.token\n self.template_uri = 'https://graph.facebook.com/v9.0/me/messages?access_token='+self.token\n self.button_template_uri = 'https://graph.facebook.com/v2.6/me/messages?access_token='+self.token\n self.typing_on_uri = 'https://graph.facebook.com/v2.6/me/messages?access_token='+self.token\n self.mark_seen_uri = 'https://graph.facebook.com/v2.6/me/messages?access_token='+self.token", "def __init__(self, oauth_consumer_token=None, oauth_access_token=None):\n self.consumer_token = oauth_consumer_token\n self.access_token = oauth_access_token", "def __init__(self, access_token):\n self._access_token = access_token", "def __init__(self, callback_url):\n # Credientials\n self.URI_SCHEME = \"https\"\n self.API_ENDPOINT = \"rightsignature.com\"\n self.REQUEST_TOKEN_URL = \"/oauth/request_token\"\n self.ACCESS_TOKEN_URL = \"/oauth/access_token\"\n self.REDIRECT_URL = \"/oauth/authorize\"\n self.version = \"1.0\"\n self.signature_method = \"HMAC-SHA1\" # as I said\n self.BASE_URL = \"%s://%s\" % (self.URI_SCHEME, self.API_ENDPOINT)\n\n self.API_KEY = \"\"\n self.API_SECRET = \"\"\n self.CALLBACK_URL = callback_url\n self.request_token = None # that comes later\n self.access_token = None # that comes later and later\n\n self.request_token_secret = None\n self.access_token_secret = None\n\n self.verifier = None\n self.error = None\n\n self.request_oauth_nonce = None\n self.request_oauth_timestamp = None\n self.access_oauth_nonce = None\n self.access_oauth_timestamp = None\n self.request_oauth_error = None\n self.access_oauth_error = None", "def __init__(self, client_id, token, scope=[\"activity\", \"heartrate\", \"location\", \"nutrition\", \"profile\", \"settings\", \"sleep\", \"social\", \"weight\"]):\n\n\t\tif token['access_token'] == \"\":\n\t\t\t# We need to fetch a token for the user.\n\t\t\tprint(\"Note: looks like we don't have an access token yet. Let's fetch one.\")\n\n\t\t\tself.client = MobileApplicationClient(client_id)\n\t\t\tself.fitbit = OAuth2Session(client_id, client=self.client, scope=scope)\n\n\t\t\tauthorization_base_url = \"https://www.fitbit.com/oauth2/authorize\"\n\n\t\t\tauthorization_url, state = self.fitbit.authorization_url(authorization_base_url)\n\n\t\t\tprint(\"Please go to the following authorization URL: {}\".format(authorization_url))\n\n\t\t\traw_callback_url = input(\"Paste callback URL you get back here: \")\n\n\t\t\tself.fitbit.token_from_fragment(raw_callback_url)\n\t\t\tself.token = self.fitbit.token['access_token']\n\n\t\t\tprint(self.fitbit.token)\n\n\t\telse:\n\t\t\t# We've got an access token, and we'll use it.\n\t\t\tself.client = MobileApplicationClient(client_id)\n\t\t\tself.fitbit = OAuth2Session(client_id, client=self.client, scope=scope, token=token)\n\t\t\tself.token = token['access_token']", "def __init__(self, client=\"ANDROID_EMBED\"):\n self.context = self._DEFAULT_CLIENTS[client][\"context\"]\n self.api_key = self._DEFAULT_CLIENTS[client][\"api_key\"]", "def __init__(self, oauth=None, client_id=None):\n\t\tself.oauth = oauth\n\t\tself.client_id = client_id or self.default_client_id", "def __init__(self,\n client_id,\n client_secret):\n self.__client_id = client_id\n self.__client_secret = client_secret", "def facebook(self):\n try:\n from facebook import Facebook\n except ImportError:\n log.warning(\"PyFacebook is not installed!\")\n else:\n if self.user and self.user.profile.uses_facebook_connect:\n # This implies, that the correct cookies must be set. We don't\n # double check for that.\n api_key = get_app().cfg['facebook/api_key']\n secret_key = get_app().cfg['facebook/secret_key']\n facebook = Facebook(api_key, secret_key)\n # Setting the cookie values\n # It's so cool to have no private attributes. (;\n facebook.uid = self.session['fb_user_id']\n facebook.session_key = self.session['fb_session_id']\n return facebook", "def __init__(self, consumer_key, consumer_secret, access_token,\n access_token_secret, **kwargs):\n self.consumer_key = consumer_key\n self.consumer_secret = consumer_secret\n self.access_token = access_token\n self.access_token_secret = access_token_secret\n super().__init__(**kwargs)", "def __init__(self) -> None:\n # TODO: Provide the complete constructor for this object", "def __init__(self, access_token, db_path, id_list):\n self.access_token = access_token\n self.db_path = db_path\n self.id_list = id_list\n\n g = facebook.GraphAPI(self.access_token, version='2.3')\n self.g = g\n\n # connect to database\n con = lite.connect(self.db_path)\n self.con = con\n\n with con:\n # create cursor to the database\n cur = con.cursor()\n self.cur = cur\n # create tables for posts, comments, post likes and people if not exists\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS Posts(post_id TEXT PRIMARY KEY, status_id TEXT, content TEXT, \"\n \"person_hash_id TEXT, published_date TEXT, last_comment_date TEXT, post_type TEXT, status_type TEXT, \"\n \"post_link TEXT, link TEXT, video_link TEXT, picture_link TEXT, link_name TEXT, link_caption TEXT, \"\n \"link_description TEXT, comment_count INTEGER, share_count INTEGER, like_count INTEGER, \"\n \"love_count INTEGER, wow_count INTEGER, haha_count INTEGER, sad_count INTEGER, angry_count INTEGER, \"\n \"mentions_count INTEGER, mentions TEXT, location TEXT, date_inserted TEXT)\")\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS Comments(comment_id TEXT PRIMARY KEY, person_hash_id TEXT, post_id TEXT, \"\n \"comment_content TEXT, comment_date TEXT, like_count INTEGER)\")\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS Post_likes(like_id TEXT PRIMARY KEY, person_hash_id TEXT, post_id TEXT)\")\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS People(person_hash_id TEXT PRIMARY KEY, person_id TEXT, person_name TEXT)\")", "def __init__(self, access_key, secret_key, **kwargs):\r\n pass", "def __init__(self):\n self.dsq = fb_forecast_q.FbForecastApi(\"gfs\")", "def __init__(self, options={}):\n # Throw an error if app_id is not present in options dict\n if 'app_id' not in options:\n raise KeyError('app_id must be supplied when making requests to the API. Get a free app_id by signing up here: https://www.opengraph.io/')\n\n self.app_id = options['app_id']\n\n # Assign options if present, or defaults if not\n # These can be overridden when making requests through get_site_info\n self.cache_ok = options['cache_ok'] if 'cache_ok' in options else True\n self.full_render = options['full_render'] if 'full_render' in options else False\n self.version = options['version'] if 'version' in options else '1.1'", "def __init__(self, username, passwordresettoken, passwordresetexpires, password, email, phone, facebook, google, linkin, group_id):\n\n self.username = username\n self.passwordresettoken = passwordresettoken\n self.passwordresetexpires = passwordresetexpires\n self.password = password\n self.email = email\n self.phone = phone\n self.facebook = facebook\n self.google = google\n self.linkin = linkin\n self.createdate = datetime.now()\n self.updatedate = datetime.now()\n self.group_id = group_id", "def __init__(self, client_id=None, access_token=None):\r\n if not client_id and not access_token:\r\n raise TypeError('__init__() must be passed at least one '\r\n 'of client_id, access_token')\r\n\r\n self.apiroot = 'https://api.instagram.com/v1'\r\n\r\n self.client_id = client_id\r\n self.access_token = access_token\r\n self.add_filter(self.add_authorization)", "def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret):\n self.api = self.getAPI(consumer_key, consumer_secret, access_token, access_token_secret)", "def __init__(self, client_id: str, client_secret: str, access_token_publish_url: str, access_token: str = None):\n\n self.client_id = client_id\n self.client_secret = client_secret\n self.access_token_publish_url = access_token_publish_url\n self.api_base_url = 'https://api.ce-cotoha.com/api/dev/'\n\n if access_token is not None:\n self.access_token = access_token\n else:\n self.access_token = self.update_access_token()", "def __init__(self,\n access_token=None,\n token_type=None,\n error=None):\n\n # Initialize members of the class\n self.access_token = access_token\n self.token_type = token_type\n self.error = error", "def __init__(self, client_id, client_secret):\n self.client_id = client_id\n self.client_secret = client_secret\n self.token = None\n self.request_time = None\n self._initialized = False", "def __init__(self, client_auth_type, client_id, client_secret=None):\n self.client_auth_type = client_auth_type\n self.client_id = client_id\n self.client_secret = client_secret", "def __init__(self, consumer_key,\n consumer_secret,\n request_token_url,\n access_token_url,\n authorize_url,\n callback_url='oob',\n version='1.0',\n token=None):\n self.__consumer_key = consumer_key\n self.__signature_method = 'HMAC-SHA1'\n self.__version = version\n self.__consumer_secret = consumer_secret\n self.__signing_key = None\n self.__signature_base_string = None\n self.__parameter_string = None\n self.__auth_headers = None\n self.__token = token\n self.__signature = None\n self.__access_token_url = access_token_url\n self.__request_token_url = request_token_url\n self.__authorize_url = authorize_url\n self.__callback_url = callback_url\n self.__response = None\n self.__request = None", "def __init__(self):\n self.dsq = fb_stations_q.FbStationsApi(\"ghcn\")", "def __init__(self, client_id=None, client_secret=None):\n self.client_id = client_id\n self.client_secret = client_secret\n self.access_token = None\n self.refresh_token = None\n self.token_expiration_time = None", "def __init__(self):\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n self.api = tweepy.API(auth)", "def __init__(self, profile: AskarProfile):\n self._profile = profile", "def __init__(self):\n self.api = Api(consumer_key=credentials[\"consumer_key\"],\n consumer_secret=credentials[\"consumer_secret\"],\n access_token_key=credentials[\"access_token_key\"],\n access_token_secret=credentials[\"access_token_secret\"])", "def __init__(self, auth, base_url=ANACODE_API_URL):\n self.auth = auth\n self.base_url = base_url", "def __init__(self, client_id, client_secret):\r\n self.client_id = client_id\r\n self.client_secret = client_secret\r\n\r\n self.add_filter(self.set_header)", "def __init__(self, adapter, config, createToken=None):\r\n self._adapter = adapter\r\n self.oauthToken = None\r\n\r\n #make sure their request implementation matches our adapter\r\n if not hasattr(adapter, \"getRequest\"):\r\n raise TypeError(\"Your http request implementation is missing the getRequest method\")\r\n if not hasattr(adapter, \"postRequest\"):\r\n raise TypeError(\"Your http request implementation is missing the postRequest method\")\r\n if not hasattr(adapter, \"deleteRequest\"):\r\n raise TypeError(\"Your http request implementation is missing the deleteRequest method\")\r\n if not hasattr(adapter, \"putRequest\"):\r\n raise TypeError(\"Your http request implementation is missing the putRequest method\")\r\n\r\n self._config = config\r\n self._oauth = OAuth(config, self._adapter)\r\n\r\n if createToken is not None:\r\n self.createAccessToken = createToken\r\n else:\r\n self.createAccessToken = self.createAccessTokenReplacement()", "def __init__(self, name, age, gender):\n\n self._name = name\n self._age = age\n self._gender = gender\n self._friend = None", "def __init__(self, url, token):\n super().__init__(url, token)", "def __init__(self):\n\n # TODO: Add login and data grab logic", "def __init__(self, bot: DreamBot) -> None:\n\n self.bot = bot", "def __init__(self, bot: DreamBot) -> None:\n\n self.bot = bot", "def __init__(self, response_type, client_id, redirect_uri, scope, state):\n self.response_type = response_type\n self.client_id = client_id\n self.redirect_uri = redirect_uri\n self.scope = scope\n self.state = state", "def __init__(self):\n self.authurl = Config().auth\n self.baseurl = Config().api\n self.s = Session()\n self.s.headers = {'Accept': 'application/json'}\n data = {\"grant_type\": \"client_credentials\", \"scope\": \"/read-public\", \"client_id\": Config().client_id,\n \"client_secret\": Config().client_secret}\n r = self.s.request(method=\"post\", url=self.authurl, data=data)\n self.s.headers = {'Accept': 'application/json', \"Access token\": r.json()[\"access_token\"]}", "def __init__(self, *args, **kwargs) -> None:\n pass", "def __init__(self, *args, **kwargs) -> None:\n pass", "def __init__(self,consumer_key,consumer_secret):\n\t\tself.CONSUMER_KEY = consumer_key\n\t\tself.CONSUMER_SECRET = consumer_secret\n\t\tself.token=None\n\t\tself.secret=None\n\t\tself.resource_owner_key=None\n\t\tself.resource_owner_secret=None", "def __init__(self, access_token=None, access_token_secret=None, consumer_key=None, consumer_secret=None, header_auth=None):\r\n if access_token is not None:\r\n self.token = Token(access_token, access_token_secret)\r\n else:\r\n self.token = None\r\n\r\n if consumer_key is None and consumer_secret is None:\r\n consumer_key = self.consumer_key\r\n consumer_secret = self.consumer_secret\r\n\r\n if header_auth is not None:\r\n self.header_auth = header_auth\r\n\r\n self.consumer = Consumer(consumer_key, consumer_secret)", "def setUp(self):\n super(FacebookSearchTestCase, self).setUp()\n # Create an app access token to create a test user\n # to create a GraphAPI object with a valid user access token.\n app_token = facebook.GraphAPI().get_app_access_token(\n self.app_id, self.secret, True\n )\n self.create_test_users(self.app_id, facebook.GraphAPI(app_token), 1)\n user = self.test_users[0]\n self.graph = facebook.GraphAPI(user[\"access_token\"])", "def __init__(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self, conv):\n self.conv = conv\n auth = self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n self.api = tweepy.API(auth)", "def __init__(self):\n self.application_id = None\n self.secret = None\n self.token = {}", "def __init__(self, API, playlist_uri):\n\n self.API = API\n self.playlist_uri = playlist_uri\n self.metadata = None", "def __init__(self, email, private_key, refresh_token=None,\n feed=None, client=None):\n self.adapters = {}\n self.email = email\n self.refresh_token = refresh_token\n self.private_key = private_key\n self.feed = feed\n self.client = client", "def __init__(self, client_id: str, client_secret: str,\n x_api_key: str, version: str):\n super().__init__(client_id, client_secret, x_api_key, version)", "def __init__(self, bot):\n self.bot = bot", "def __init__(self, bot):\n self.bot = bot", "def __init__(self, access_token=None):\r\n self.access_token = access_token\r\n\r\n self.add_filter(self.add_auth)", "def __init__(self, access_token, base_url=SHEERID_ENDPOINT_SANDBOX,\n target_version=\"0.5\", verbose=False, insecure=False):\n self.access_token = access_token\n self.base_url = base_url\n self.verbose = verbose\n self.target_version = target_version\n self.insecure = insecure", "def __init__(self, *args, **kwargs):\n pass", "def __init__(self, *args, **kwargs):\n pass", "def __init__(self, *args, **kwargs):\n pass", "def __init__(self, *args, **kwargs):\n pass", "def __init__(self, url=\"https://conveyor.hive.blog\",\n hive_instance=None):\n\n self.url = url\n self.hive = hive_instance or shared_hive_instance()\n self.id = 0\n self.ENCODING = 'utf-8'\n self.TIMEFORMAT = '%Y-%m-%dT%H:%M:%S.%f'\n self.K = hashlib.sha256(py23_bytes('steem_jsonrpc_auth',\n self.ENCODING)).digest()", "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "def __init__(self, who: str, when: date, what: str) -> None:\n\n # YOUR CODE HERE\n self.content = what\n self.userid = who\n self.created_at = when\n self.likes = 0", "def __init__(self, api_key=None, platforms=[]):\n super(GiantBombFeed, self).__init__()\n self.base_url = \"http://www.giantbomb.com/api/games\"\n self.default_params = {\n \"api_key\": api_key,\n \"format\": \"json\"\n }\n self.results_per_page = 100\n self.platforms = platforms", "def __init__ (self):\n pass", "def __init__(self, api_key, client_id=None, client_secret=None):\n self.api = API(api_key)\n self._manifest = Manifest(self.api)\n self.oauth = OAuth(client_id, client_secret)", "def __init__(self, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)", "def __init__(self, api_key, client=Fetcher(FANART_URL)):\n self.api_key = api_key\n self.client = client", "def __init__(self, url, username, password, **kwargs):\n self.url = url\n self.username = username\n self.password = password\n self.context = kwargs", "def __init__(self, media, username, password):\n self.media = media\n self.username = username\n self.password = password", "def __init__(self, url, params=None):\n super(LivestreamVideo, self).__init__(url, params)\n self.video_id = self.get_video_id()\n self.livestream_user = self.get_username()", "def __init__(self, bot=BNBot):\n self.bot = bot", "def __init__(self, api_key, app_id):\n api.APIRequest.__init__(self, api_key)\n self._key = 'app_users'\n self._app_id = app_id", "def __init__(self, messenger: CanMessenger) -> None:\n self._messenger = messenger", "def __init__(self, access_token=None):\r\n self.access_token = access_token\r\n\r\n self.add_filter(self.add_auth)\r\n self.add_filter(self.use_json)", "def __init__(self, access_token=None):\r\n self.access_token = access_token\r\n\r\n self.add_filter(self.add_auth)\r\n self.add_filter(self.use_json)", "def __init__(self, access_token: str):\n self._access_token = access_token\n self._history = collections.deque(maxlen=20)\n self._schema: graphql.GraphQLSchema = MISSING\n self._localstorage = None\n self._contextstorage = None\n self._transport = None\n self._query_type = \"query\"\n self._required_query_output: Optional[str] = None\n self._service_endpoint: Optional[str] = None\n self._caller: Optional[str] = None", "def __init__(self, apikey, secret):\n self.apikey = apikey\n self.secret = secret", "def __init__(self, bot: BunkBot, channels: ChannelService):\r\n self.bot: BunkBot = bot\r\n self.message: Message = None\r\n self.channels: ChannelService = channels\r\n self.yt_result: YoutubeResult = YoutubeResult()\r\n self.yt_link: str = \"\"", "def __init__(self, id_: str, bio: str) -> None:\n\n # YOUR CODE HERE\n self.userid = id_\n self.bio = bio\n self.tweets = []", "def __init__(self, public_key, private_key, token, token_secret, base_url='http://api.telldus.com'):\n self.public_key = public_key\n self.private_key = private_key\n self.token = token\n self.token_secret = token_secret\n\n self.base_url = base_url\n\n self.oauth = self.generate_temp_session()", "def __init__(self, username = None, password = None):\n self.username = config['AUTH']['USERNAME']\n self.password = config['AUTH']['PASSWORD']\n self.login = config['URL']['LOGIN']\n self.nav_url = config['URL']['NAV']\n self.tag_url = config['URL']['TAGS']\n self.direct_url = config['URL']['DM']\n self.driver = webdriver.Chrome(config['ENVIRONMENT']['CHROMEDRIVER'])\n self.stay_logged = False\n self.api = InstagramAPI(self.username, self.password)", "def __init__(self, access_token=None):\r\n self.access_token = access_token\r\n\r\n self.add_filter(self.add_auth)\r\n self.add_filter(self.set_format)", "def __init__(self, host, access_key, secret_key):\n self._host = host\n self._access_key = access_key\n self._secret_key = secret_key", "def __init__(self, host, access_key, secret_key):\n self._host = host\n self._access_key = access_key\n self._secret_key = secret_key", "def __init__(self, host, access_key, secret_key):\n self._host = host\n self._access_key = access_key\n self._secret_key = secret_key", "def __init__(self, *args, **kwargs):\r\n self.jsonp_callback = kwargs.pop('jsonp_callback', None)\r\n super(JsonResponse, self).__init__(*args, **kwargs)", "def __init__(self, address=('', 50000), authkey=b'tradingbot'):\n _ClientBot.__init__(self, address=address, authkey=authkey)", "def __init__(self):\n # Get a weboob instance\n self.weboob = Weboob()\n self.backend = None", "def __init__(self, api_client=None): # noqa: E501,D401,D403\n super().__init__(api_client)", "def __init__(self, *args, **kwargs):\n super(self.__class__, self).__init__(*args, **kwargs)", "def initialize(self, **kwargs):", "def __init__(self, url, auth_token, xapi_version=\"1.0.3\"):\n self.url = url\n self.auth_token = auth_token\n self.xapi_version = xapi_version" ]
[ "0.70955795", "0.70331687", "0.65805393", "0.6543465", "0.6526601", "0.64303684", "0.63264066", "0.625907", "0.6256634", "0.61950535", "0.61830455", "0.618123", "0.61768824", "0.61615527", "0.6148158", "0.6142532", "0.6112654", "0.6108066", "0.6090049", "0.6088559", "0.6086943", "0.6074998", "0.60672325", "0.605394", "0.6045962", "0.6040036", "0.60149527", "0.5993534", "0.59856105", "0.59599227", "0.59501463", "0.59495455", "0.5912712", "0.5907437", "0.59059656", "0.5894901", "0.5880138", "0.587572", "0.5867523", "0.5865871", "0.5854897", "0.5854897", "0.5853742", "0.58289313", "0.58239096", "0.58239096", "0.5808401", "0.579908", "0.57942367", "0.57665926", "0.57665926", "0.57665926", "0.5764218", "0.5758987", "0.57385826", "0.57364357", "0.5722739", "0.5719485", "0.5719485", "0.5709229", "0.5705617", "0.57050306", "0.57050306", "0.57050306", "0.57050306", "0.570057", "0.5698216", "0.5698216", "0.5698216", "0.5694722", "0.56903785", "0.5688886", "0.5674782", "0.5669014", "0.56684124", "0.5667425", "0.56655794", "0.5661962", "0.56596553", "0.5654917", "0.5654659", "0.56546295", "0.56546295", "0.56544846", "0.56534", "0.5651796", "0.56489694", "0.5637894", "0.56267536", "0.5623547", "0.5623199", "0.5623199", "0.5623199", "0.5611183", "0.56029916", "0.55935854", "0.5590362", "0.5588238", "0.55843276", "0.55816364" ]
0.760726
0
Save event to database
def save_event(self, data): rdb.table(self.rdb_table).insert(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, event):\n self.saved_events.append(event)", "def insert_event_to_db(self):\n try:\n events_coll.insert_one(self.event_info_to_dic())\n except Exception as e:\n print(e)", "def save(self, db):\n pass", "def save_event(id):\n event = Event.query.get_or_404(id)\n if not current_user.has_saved(event):\n current_user.save(event)\n db.session.commit()\n return jsonify({\"message\": \"Event added to your saved events list.\"})\n else:\n return jsonify({\"message\": \"You have already saved this event.\"})", "def save(self):\n self.db.commit()", "def save_db(self) -> None:", "def save(self):\n self.__db.commit()", "def save(self):\n db.session.commit()", "def save(self):\n self.session.commit()", "def save(self):\n logging.debug(\"sychronizing db\")\n self._db.sync()", "def writeEvent(self):\n\t\ttry:\n\t\t\tif self.dataFileHnd:\n\t\t\t\tself.dataFileHnd.writeRecord( (self.mdList())+[self.eventData] )\n\t\texcept sqlite3.OperationalError, err:\n\t\t\t# If the db is locked, wait 1 s and try again.\n\t\t\tprint err\n\t\t\ttime.sleep(1)\n\t\t\tself.writeEvent()\n\t\t# else:\n\t\t# \traise MissingMDIOError(\"Meta-data I/O object not initialized.\")", "def save_data(self):\n db.session.add(self)\n db.session.commit( )", "def save(self):\n # send data to be saved by another job\n save_callevent.delay(self.data)", "def save():", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n file = Path(\"config/event_{0}.json\".format(self.name))\n try:\n file.write_text(self.toJSON())\n except Exception as err:\n raise(err)", "def save(self)->None:\n database.cursor.execute(\"INSERT INTO meetups(topic,happening_date,tags,location,images,body) VALUES(%s,%s,%s,%s,%s,%s) RETURNING id\", (\n self.topic,\n self.happening_on,\n self.tags,\n self.location,\n self.images,\n self.body\n ))\n super().save()", "def save(self, *args, **kwargs):\n pass", "def writeToDB(self, eventDateTime, eventFileName, eventType, eventPath):\n conn = self.createConnection()\n c = conn.cursor()\n\n c.execute(\"INSERT INTO RansomedFiles (TIME, EventFileName, EventType, EventPath) VALUES (?,?,?,?)\", (eventDateTime, eventFileName, eventType, eventPath))\n conn.commit()\n conn.close()\n\n # print(\"[+]Wrote to the database successfully!\")", "def create_event(data):\n event = EventModel(**data)\n db.session.add(event)\n db.session.commit()\n return event", "def save(self):\n\n self.__session.commit()", "def save(self):\n\n self.__session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def write_to_db( self, *args ):\n try:\n toSave = [ a for a in args ]\n # save them\n self.session.add_all( toSave )\n self.session.commit()\n self._fire_save_notification()\n return True\n except Exception as e:\n print( \"Error : %s\" % e )\n self._fire_error_saving_notification( e )\n return False", "def save(self, *args, **kwargs) -> None:\n pass", "def save(self, *args, **kwargs) -> None:\n pass", "def save(self, *args, **kwargs) -> None:\n pass", "def saveDatabase():\r\n debug.write(\"saveDatabase processing\", 1)\r\n \"\"\" Only process if turbo mode is off \"\"\"\r\n if not currentTurboMode:\r\n debug.write(\"turbo mode off, process the save\", 1)\r\n \"\"\" Update all the player's stats gained and commit the database\"\"\"\r\n for player in players:\r\n debug.write(\"Commiting indivudal players to the virtual database: %s\" % player.name, 2)\r\n player.commit()\r\n debug.write(\"Attempting to save the database itself\", 1)\r\n database.save()\r\n debug.write(\"SQLite database saved\", 1)\r\n debug.write(\"Creating the event\", 1)\r\n \"\"\" Create and fire the event \"\"\"\r\n values = {\"type\":(\"setstring\", str(saveType))}\r\n gamethread.delayed(0, fireEvent, (\"sourcerpg_databasesaved\", values))\r\n debug.write(\"Event fired\", 1)\r\n \r\n \"\"\" Create a loop if we need to \"\"\"\r\n if str( saveType ) == \"intervals\":\r\n gamethread.delayedname(float(saveLength), 'sourcerpg_databasesave', saveDatabase)\r\n debug.write(\"saveDatabase processed\", 1)", "def test_put_event_on_calendar_db(self):\n camp_group = CampGroup('falcons', 'yellow')\n\n start = datetime.now()\n end = datetime.now()\n camp_event = CampEvent(\"basketball\", start, end)\n camp_group.events.append(camp_event)\n db.session.add(camp_group)\n db.session.add(camp_event)\n db.session.commit()\n\n new_title = 'soccer'\n json_data = {\n 'id': CampEvent.query.filter_by(title=\"basketball\").first().id,\n 'title': new_title,\n 'start': CampEvent.convert_py_datetime_to_iso_datetime(start),\n 'end': CampEvent.convert_py_datetime_to_iso_datetime(end),\n 'group_id': CampEvent.query.filter_by(title=\"basketball\").first().group_id\n }\n\n self.app.put(\"/saveEvent\", data=json.dumps(json_data), content_type='application/json')\n event = CampEvent.query.first()\n self.assertEqual(event.title, new_title)", "def save():\n pass", "def send(self, event):\r\n try:\r\n self.collection.insert(event, manipulate=False)\r\n except PyMongoError:\r\n # The event will be lost in case of a connection error.\r\n # pymongo will re-connect/re-authenticate automatically\r\n # during the next event.\r\n msg = 'Error inserting to MongoDB event tracker backend'\r\n log.exception(msg)", "def store_event(self, event: EventLogEntry) -> None:\n check.inst_param(event, \"event\", EventLogEntry)\n insert_event_statement = self.prepare_insert_event(event)\n run_id = event.run_id\n\n with self.run_connection(run_id) as conn:\n conn.execute(insert_event_statement)\n\n if event.is_dagster_event and event.dagster_event.asset_key: # type: ignore\n check.invariant(\n event.dagster_event_type in ASSET_EVENTS,\n \"Can only store asset materializations, materialization_planned, and\"\n \" observations in index database\",\n )\n\n event_id = None\n\n # mirror the event in the cross-run index database\n with self.index_connection() as conn:\n result = conn.execute(insert_event_statement)\n event_id = result.inserted_primary_key[0]\n\n self.store_asset_event(event, event_id)\n\n if event_id is None:\n raise DagsterInvariantViolationError(\n \"Cannot store asset event tags for null event id.\"\n )\n\n self.store_asset_event_tags(event, event_id)\n\n if event.is_dagster_event and event.dagster_event_type in ASSET_CHECK_EVENTS:\n self.store_asset_check_event(event, None)", "def save(self):\n\t\tdb.session.add(self)\n\t\tdb.session.commit()", "def save(self):\n if self.document.id:\n self.db.insert(self.document)\n else:\n self.db.update(self.document.id,self.document)", "def event_process():\n title = request.args.get(\"title\")\n description = request.args.get(\"des\")\n location = request.args.get(\"location\")\n start_date_time = request.args.get(\"start_date_time\")\n end_date_time = request.args.get(\"end_date_time\")\n user_id = session['user']\n sport_id = request.args.get(\"sport\")\n\n event = Event(title = title, description = description,\n location = location,date = start_date_time, time = end_date_time,\n user_id=user_id, sport_id=sport_id)\n\n db.session.add(event)\n db.session.commit()\n return redirect('/')", "def save_alert(self, alert):\n self.database_list.append(alert) # fake database for demo", "def Save(self) -> None:\n self.__conn.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()\n # try:\n # db.session.add(self)\n # db.session.commit()\n # except exc.IntegrityError:\n # db.session.rollback()", "def save_now(self):\r\n self.save()", "def save_now(self):\r\n self.save()", "def save(self):\r\n db.session.add(self)\r\n db.session.commit()", "def save (self):\n pass", "def save_changes(data):\n db.session.add(data)\n db.session.commit()", "def save(self):\n \n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self, db):\n db.query(\n \"INSERT INTO rooms (name, type) VALUES(:name, :type)\",\n name=self.name, type='O'\n )", "def save(self):\n\n pass", "def save_db(self) -> None:\n self.connection.commit()", "def save(self, *args, **kwargs) -> Any:\n pass", "def saveAll(self, event=None):\n self.s.end()", "def save(self, handler, name):", "def save(self) -> None:\n pass", "def save(self) -> None:\n pass", "def save(self) -> None:\n pass", "def save(self):\n data = self.serialize()\n\n self.validate(data)\n\n saved_data = DATABASE_CONNECTION.insert(self.__class__.__name__, data)\n\n self.__dict__.update(saved_data)", "def save(self):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def store_event(self, violations):\n current_time = datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\")\n insert_query = \"\"\"INSERT INTO social_distancing (Location, Local_Time, Violations) VALUES ('{}', '{}', {})\"\"\".format(self.location, current_time, violations)\n self.off_chain.insert(insert_query)\n\n event_id = self.off_chain.select(\"\"\"SELECT LAST_INSERT_ID() FROM social_distancing\"\"\")[0][0]\n self.on_chain.store_hash(event_id, self.location, current_time, violations)", "def save(self, *args, **kwargs):\n return", "def create_event(conn, event):\n sql = ''' INSERT INTO events(ISO_Week,Event_ISO_Date,Unix_Time,Node,Event_Type,Duration)\n VALUES(?,?,?,?,?,?) '''\n c = conn.cursor()\n c.execute(sql, event)\n conn.commit()\n return c.lastrowid", "def save(self):\n # TODO (Pierre): code", "def __write_event(self, handle, nbr):\n try:\n event = self.database.get_event_from_handle(handle)\n self.__write_row(nbr, handle, event)\n except:\n event = \"NOT FOUND\"\n self.__write_row(nbr, handle, event)", "def save(self, *args):\n # need to do!!\n pass", "def save(self, data):\n data['id'] = self.id\n\n self.db.append(data)", "def _store_event(self, event):\n symbol = event.symbol\n self.symbol[symbol][\"bid\"] = event.bid\n self.symbol[symbol][\"ask\"] = event.ask\n self.symbol[symbol][\"timestamp\"] = event.time", "def save(self, *args, **kwargs):\n super(self.__class__, self).save(*args, **kwargs)", "def save(self, obj):", "def save(self):\n\n if self.object_id:\n # update event\n if not self._track_changes:\n return True # there's nothing to update\n url = self.build_url(\n self._endpoints.get('event').format(id=self.object_id))\n method = self.con.patch\n data = self.to_api_data(restrict_keys=self._track_changes)\n else:\n # new event\n if self.calendar_id:\n url = self.build_url(\n self._endpoints.get('event_calendar').format(\n id=self.calendar_id))\n else:\n url = self.build_url(self._endpoints.get('event_default'))\n method = self.con.post\n data = self.to_api_data()\n\n response = method(url, data=data)\n if not response:\n return False\n\n self._track_changes.clear() # clear the tracked changes\n\n if not self.object_id:\n # new event\n event = response.json()\n\n self.object_id = event.get(self._cc('id'), None)\n\n self.__created = event.get(self._cc('createdDateTime'), None)\n self.__modified = event.get(self._cc('lastModifiedDateTime'), None)\n\n self.__created = parse(self.__created).astimezone(\n self.protocol.timezone) if self.__created else None\n self.__modified = parse(self.__modified).astimezone(\n self.protocol.timezone) if self.__modified else None\n\n self.ical_uid = event.get(self._cc('iCalUId'), None)\n else:\n self.__modified = self.protocol.timezone.localize(dt.datetime.now())\n\n return True", "def insert_event(self, data):\n query = \"INSERT INTO events (Users_idUsers, Title, Content, DateEvent, Fundraiser, FundraiseAmount)\" \\\n \" VALUES('{}', '{}', '{}', '{}', {},\" \\\n \" {})\".format(data[\"Users_idUsers\"], data[\"Title\"], data[\"Content\"], data[\"DateEvent\"], data[\"Fundraiser\"], data[\"FundraiseAmount\"])\n\n cursor = DB.instance.connection.cursor()\n cursor.execute(query)\n DB.instance.connection.commit()\n return cursor.lastrowid", "def save_to_db(self):\n result = self.db.newsdb.insert_one({\"name\": self.name})\n self.id = str(result.inserted_id)", "def save(self, commit=True):\r\n event = super(RPEventCreateForm, self).save(commit)\r\n event.add_host(self.owner, main_host=True)\r\n hosts = self.cleaned_data.get(\"hosts\", [])\r\n for host in hosts:\r\n # prevent owner from being downgraded to normal host if they were added\r\n if host != self.owner:\r\n event.add_host(host)\r\n gms = self.cleaned_data.get(\"gms\", [])\r\n for gm in gms:\r\n event.add_gm(gm)\r\n for guest in self.cleaned_data.get(\"invites\", []):\r\n if guest in hosts or guest in gms or guest == self.owner:\r\n continue\r\n event.add_guest(guest)\r\n for org in self.cleaned_data.get(\"org_invites\", []):\r\n event.invite_org(org)\r\n plot = self.cleaned_data.get(\"plot\", None)\r\n if plot:\r\n # we create a blank PlotUpdate so that this is tagged to the Plot, but nothing has happened yet\r\n event.beat = plot.updates.create()\r\n event.save()\r\n self.pay_costs()\r\n self.post_event(event)\r\n return event", "def save(self):\n try:\n db.session.add(self)\n db.session.flush()\n except Exception:\n db.session.rollback()\n raise Exception", "def save_article(title,image,description,content,pub_date,news_url,note,user):\n article = Article(\n title=title,\n image=image,\n description=description,\n content = content,\n pub_date=datetime.strptime(pub_date, \"%Y-%m-%dT%H:%M:%SZ\"),\n news_url=news_url\n )\n \n db.session.add(article)\n \n #testing\n print(article)\n \n # 2.0 add rating and notes during save event\n #creating the relationship between user and the saved article\n saved_article = Saved(\n user=user,\n article=article,\n notes=note\n )\n # notes=notes,\n # rating=rating)\n \n db.session.add(saved_article)\n db.session.commit()\n\n #testing\n print(saved_article)", "def test_post_event_on_calendar_db(self):\n camp_group = CampGroup('falcons', 'yellow')\n db.session.add(camp_group)\n db.session.commit()\n\n json_data = {\n 'title': 'Test Event',\n 'start': '2017-8-8T12:00:00',\n 'end': '2017-8-8T12:00:00',\n 'group_id': '1'\n }\n\n self.app.post(\"/saveEvent\", data=json.dumps(json_data), content_type='application/json')\n events = CampEvent.query.all()\n self.assertEqual(len(events), 1)", "def save_database(app):\n app.database().save()\n app.status.message('Finished saving..')", "def save(self, record):\n pass", "def save(self):\n store = datastore.DataStore()\n store.connect()\n store.setup()\n store.put(self.as_doc())" ]
[ "0.7487547", "0.7325725", "0.7049459", "0.6976891", "0.69406134", "0.6901806", "0.68859524", "0.6789551", "0.66951", "0.66863483", "0.66196334", "0.65969324", "0.65858656", "0.6583082", "0.6570057", "0.6570057", "0.6570057", "0.6570057", "0.6570057", "0.6570057", "0.6570057", "0.6570057", "0.6541968", "0.6539257", "0.65242314", "0.6522762", "0.651794", "0.65169066", "0.65169066", "0.6513589", "0.6513589", "0.6513589", "0.6513589", "0.6507921", "0.6482275", "0.6482275", "0.6482275", "0.6451125", "0.6427038", "0.6393112", "0.6382829", "0.636872", "0.6360689", "0.6327973", "0.63158274", "0.6304676", "0.6301712", "0.62829363", "0.6279994", "0.6279994", "0.62736803", "0.6272558", "0.6266276", "0.6262058", "0.62575966", "0.62575966", "0.62575966", "0.62575966", "0.62575966", "0.62575966", "0.62575966", "0.62575966", "0.62575966", "0.62575966", "0.62575966", "0.62394124", "0.62295055", "0.6229459", "0.62279755", "0.62210166", "0.6220982", "0.6212234", "0.6212234", "0.6212234", "0.62052035", "0.6201193", "0.6201193", "0.6201193", "0.6201193", "0.6201193", "0.6200218", "0.6200019", "0.61928797", "0.6180285", "0.61682576", "0.6163157", "0.6156192", "0.6143384", "0.61411875", "0.6133863", "0.61326617", "0.6119215", "0.61138654", "0.6106274", "0.6092648", "0.60674214", "0.60649467", "0.60537696", "0.6046048", "0.604026" ]
0.8077102
0
Iterate through all events pages
def get_events(self): url = '/v2.4/'+self.page_id+'/events' data = self.graph.request(url) while 'next' in data['paging'].keys(): print data['paging']['next'] data = self.graph.request(url, args={ 'limit' : 100, 'after' : data['paging']['cursors']['after'] }) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_event_list(self, response):\n for event in response.css(\".view-content .article-title a::attr(href)\"):\n event_url = event.extract()\n yield scrapy.Request(\n response.urljoin(event_url),\n callback=self.parse_event_page,\n dont_filter=True,\n )\n next_url = self._response_next_url(response)\n if next_url:\n yield scrapy.Request(\n response.urljoin(next_url),\n callback=self.parse_event_list,\n dont_filter=True,\n )", "def parse(self, response):\n for link in response.css(\".event-entry .event-title a::attr(href)\").extract():\n yield scrapy.Request(\n response.urljoin(link), callback=self.parse_event_page, dont_filter=True\n )", "def events(self) -> [redirect, HTMLBody]:\n\t\t# Get all events and split into 2 groups\n\t\teventsl, eventsr = prepare_events(get_events())\n\t\treturn render_template(\"events.jinja2\", eventsl=eventsl, eventsr=eventsr)", "def searchForEvents(self, search_args, onProgress):\n print('[EventFinder]: Search For Events called. Checking how many pages to crawl...')\n pages = self.get_total_pages_to_search(search_args)\n urls = [self.assembleRequest(search_args, p) for p in range(1, pages + 1)]\n\n print('[EventFinder]: Crawling %d pages from the eventful api...' % pages)\n start_ms = time_ms()\n\n for u in urls:\n response = requests.get(u)\n events = self.parse_events(response)\n onProgress(events)\n\n print('[EventFinder]: Crawling took ' + str(time_ms() - start_ms) + ' ms')", "def scrape_events(meta_url, collection):\r\n options = Options()\r\n options.add_argument('--headless')\r\n driver = webdriver.Firefox(options=options)\r\n driver.get(meta_url)\r\n soup = BeautifulSoup(driver.page_source, 'html.parser')\r\n meta_dropdown = soup.find('select', {'name': 'meta'}) # get drop down selector for meta\r\n selected_meta = meta_dropdown.find('option', selected=True) # get current meta\r\n \r\n def get_next(d, class_name):\r\n \"\"\"Check if the next button is still valid\"\"\"\r\n try:\r\n button = d.find_elements_by_class_name('Nav_PN')[-1]\r\n return button if button.text == 'Next' else False\r\n except Exception as e:\r\n return False\r\n \r\n page = 1\r\n while True:\r\n print(f'\\nScraping event page {page}...')\r\n next_btn = get_next(driver, 'Nav_PN')\r\n soup = BeautifulSoup(driver.page_source, 'html.parser') # make some soup\r\n \r\n for event in soup.find_all(class_='Stable')[2].find_all(class_='hover_tr'): # 10 events list table\r\n \"\"\"\r\n This loop iterates through event table rows, pulling out an ID number,\r\n the star rating and the date of the event\r\n \"\"\"\r\n link = event.a # associated hyperlink\r\n eid = re.search(r\"e=(\\d+)&\", link['href']).group(1) # unique id number\r\n stars = event.find(class_='O16').find_all('img') # star rating / level\r\n collection.insert_one({\r\n 'id': eid,\r\n 'name': link.text,\r\n 'date': event.find(class_='S10').text,\r\n 'level': 4 if 'bigstar' in stars[0]['src'] else len(stars),\r\n 'link': mtgtop8_url.format(link['href']),\r\n 'meta': selected_meta.text\r\n })\r\n \r\n if next_btn:\r\n next_btn.click()\r\n page += 1\r\n sleep(1)\r\n else:\r\n print('\\n\\n')\r\n driver.close()\r\n break", "def show_events_list():\r\n\tevents_list = Page.objects.filter(tags='events').order_by('-created')\r\n\treturn {'events_list': events_list}", "async def events(self) -> Iterable[Event]:", "def __show_all_events(self):\n for event in self.events_list:\n self.__print_events_info(event)\n print()", "def test_event_page(self):\n res = self.client.get('/events')\n data = res.data.decode('utf-8')\n assert res.status == '200 OK'\n assert 'Upcoming Events' in data", "def _iter_events(self) -> Generator:\n response = self.client.call()\n events: list = response.json()\n\n if not events:\n return []\n\n while True:\n yield events\n last = events.pop()\n self.client.set_next_run_filter(last['@timestamp'])\n response = self.client.call()\n events = response.json()\n try:\n events.pop(0)\n assert events\n except (IndexError, AssertionError):\n LOG('empty list, breaking')\n break", "def scrape_events(path, urls):\n seen_ids = set()\n result = []\n for url in urls:\n # Get all of the Network requests being sent out\n print(f'Processing {url}')\n driver.get(url)\n browser_log = driver.get_log('performance') \n events = [process_browser_log_entry(entry) for entry in browser_log]\n results = []\n # Find the Network request that sends a GET request to EventBrite API\n for event in events:\n if event['method'] == 'Network.responseReceived':\n # print(event)\n if 'event_ids' in event['params']['response']['url']:\n results.append(event)\n # Get the GET request URL\n get_url = \"\"\n # TODO: Sometimes returning 0 or more than 1... I'm not sure why :(\n if len(results) >= 1:\n get_url = results[0]['params']['response']['url']\n # Get the GET request response JSON\n json_response = get_request(get_url)\n event_list = json_response['events']\n # Find unique events in the response JSON \n unique_event_list = []\n for event in event_list:\n if event['id'] not in seen_ids:\n seen_ids.add(event['id'])\n unique_event_list.append(event)\n parsed_events = parse_event_page(unique_event_list)\n result.extend(parsed_events)\n else:\n print(results)\n print('yikes something went wrong')\n\n driver.close()\n return result\n # save_events(path, result)", "def get_events_helper(Event):\n try:\n limit = int(request.args.get('limit'))\n page = int(request.args.get('page'))\n except:\n limit = 10\n page = 1\n user_input = \"get_all\"\n check_input_dict = {\n \"get_all\": lambda: Event.get_all_pages(limit, page)\n }\n events_page_object = check_input_dict.get(user_input, \"Something went wrong!!\")()\n status_code = 200\n result = {\"Events\": print_events(pagination(events_page_object)[0]),\n \"Current page\": pagination(events_page_object)[1],\n \"All pages\": pagination(events_page_object)[2]}\n return result, status_code", "def slurp_events(self):\n while self.has_event():\n self.get_event()", "def events(self):\n if \"events\" in self._prop_dict:\n return EventsCollectionPage(self._prop_dict[\"events\"])\n else:\n return None", "def list_event(request):\n event_list = Event.objects.all()\n paginator = Paginator(event_list, 5)\n\n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1\n\n # If page request (9999) is out of range, deliver last page of results.\n try:\n event_list = paginator.page(page)\n except (EmptyPage, InvalidPage):\n event_list = paginator.page(paginator.num_pages)\n\n context = {'event_list': event_list }\n return render_to_response('event_list.html',\n context,\n context_instance=RequestContext(request))", "def test_response_is_paginated(self):\r\n user = ViewAfishaTests.mentor\r\n EventFactory.create_batch(50, city=user.profile.city)\r\n client = self.return_authorized_user_client(user)\r\n\r\n response_data = client.get(path=EVENTS_URL).data\r\n\r\n self.assertTrue(\"next\" in response_data)\r\n self.assertTrue(\"previous\" in response_data)\r\n self.assertTrue(\"results\" in response_data)", "def parse_competition(self, response):\n # gather events from the competition page\n event_urls = response.css('div.navilevel1 p a::attr(href)').getall()\n event_titles = response.css('div.navilevel1 p a::text').getall()\n\n for event_url, event_title in zip(event_urls, event_titles):\n # assemble direct URL for this event\n full_event_url = response.urljoin(event_url)\n\n # pass along metadata for use in next steps\n event_details = parse_qs(urlsplit(full_event_url).query)\n event_title = regex_replace(event_title)\n\n if treatable_event(event_title):\n response.meta.update(dict(instance_of_event_in_competition=detect_event_multiple(event_title),\n event_title=clean_event_title(event_title),\n event_gender=event_details.get(\"gen\", [np.nan])[0]))\n\n # scrape the event page\n yield scrapy.Request(url=full_event_url,\n callback=self.parse_event,\n meta=response.meta)", "def events(self) -> Generator[dict, None, None]:\n\n for audit_file, audit_type in self.identified_files.items():\n temp_file_path = f\"{self.tempdir.name}/{audit_file}\"\n\n if audit_type == \"stateagentinspector\":\n yield from self.parse_agent_events(temp_file_path)\n\n # If we have atleast the hits.json file, we can make alert nodes\n if self.alert_files[\"hits.json\"]:\n yield from self.parse_alert_files(self.tempdir.name)\n\n self.tempdir.cleanup()", "def get_events(self):\n\n print \"\\ngetting new Events\"\n path = os.path.join(self.path, 'no_consent')\n for d_cnt, date in sorted(enumerate(os.listdir(path))):\n\n if os.path.isdir(os.path.join(self.events_path, date)):\n print \"%s already processed\" % date\n continue\n\n directory = os.path.join(path, date)\n for recording in os.listdir(directory):\n if os.path.isdir(os.path.join(directory, recording)):\n\n # Can we reduce this list of objects using ROI information?\n try:\n use_objects = {}\n for region, objects in self.soma_objects.items():\n for ob, position in objects.items():\n use_objects[ob] = position\n\n ce.get_event(recording, directory, use_objects, self.config['events'])\n except:\n print \"recording: %s in: %s is broken.\" %(recording, directory)\n else:\n print \"already processed: %s\" % recording\n print \"done.\"", "def scrape_event(self, body):\n\n content = body.find('div', {'id': 'main-content'})\n\n title = self.scrape_title(body)\n description = self.scrape_description(content)\n location = self.scrape_location(content)\n location_details = self.scrape_location_details(content)\n admission = self.scrape_admission(content)\n admission_details = self.scrape_admission_details(content)\n # sponsor = self.scrape_sponsor(content)\n related_url = self.scrape_related_url(content)\n invited_audience = self.scrape_invited_audience(content)\n categories = self.scrape_categories(content)\n image = self.scrape_image(content)\n date_times = self.scrape_dates(content)\n\n cost = admission_details\n\n if admission_details == '\"\"':\n cost = admission\n\n event_list = []\n\n for date_time in date_times:\n date, start_time = self.date_time_to_tuple(date_time[0])\n end_time = ''\n\n # If the date_time tuple shows that it is an all day event\n if date_time[1]:\n start_time = '8:00'\n end_time = '20:00'\n event_dict = {\n 'Title': title,\n \"Description\": description,\n 'Date From': date,\n 'Start Time': start_time,\n 'End Time': end_time,\n 'Location': location,\n 'Cost': cost,\n 'Event Website': related_url,\n 'Photo URL': image,\n \"Invited Audience\": invited_audience,\n \"Event Types\": categories,\n \"Location Details\": location_details\n }\n event_list.append(event_dict)\n return event_list", "def get(self):\r\n #\"SELECT * FROM DBEvent\"\r\n self.insertContent(\"<hr>&nbsp;&nbsp;&nbsp;Грядущие события:<br>\")\r\n event = self.event #db.GqlQuery(self.query) \r\n eventlist=''\r\n #self.checkSession(self.request.headers.get('Cookie'), False)\r\n found_events = False\r\n \r\n ec = DBEventCat()\r\n cats = ec.get_categories()\r\n \r\n for this_event in event:\r\n try:\r\n if not found_events: found_events = True\r\n if self.Session['access'] >= this_event.access or int(self.Session['userid']) == int(this_event.userid) or this_event.access <= 0:\r\n eventlist += '<a href=\"/event/'+str(this_event.key().id())+'\">'+this_event.name.encode(\"utf8\")+'</a>'\r\n users = db.GqlQuery(\"SELECT * FROM DBEventAnketa WHERE eventid = :eventid\",\r\n eventid = this_event.key().id())\r\n if self.Session['access'] >= 8 or int(self.Session['userid']) == int(this_event.userid): \r\n eventlist += '&nbsp;[ <i><a href=\"/event/info/' + str(this_event.key().id()) + '\">Участников зарегистрировано: ' + str(users.count()) + '</i></a> ]<br>'\r\n elif self.Session['access'] >= this_event.access:\r\n eventlist += '&nbsp;[ <i>Участников зарегистрировано: ' + str(users.count()) + '</i> ]<br>'\r\n except: continue\r\n if found_events:\r\n self.insertTemplate('tpl_event_add.html', { 'eventlist': eventlist, 'cats' : cats })\r\n else:\r\n self.insertContent(\"&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Пока мероприятий не запланировано!\")\r\n self.insertContent(\"<hr>&nbsp;&nbsp;&nbsp;Недавно прошедшие события:<br>\")\r\n \r\n eventlist = ''\r\n events = db.GqlQuery(\"SELECT * FROM DBEvent where date<:today order by date desc limit 10\", today = db.datetime.date.today())\r\n for this_event in events:\r\n if self.Session['access'] >= this_event.access or int(self.Session['userid']) == int(this_event.userid):\r\n eventlist += '<a href=\"/event/'+str(this_event.key().id())+'\">'+this_event.name.encode(\"utf8\")+'</a>'\r\n users = db.GqlQuery(\"SELECT * FROM DBEventAnketa WHERE eventid = :eventid\",\r\n eventid = this_event.key().id())\r\n if self.Session['access'] >= 8 or int(self.Session['userid']) == int(this_event.userid): \r\n eventlist += '&nbsp;[ <i><a href=\"/event/info/' + str(this_event.key().id()) + '\">Участников зарегестрировано: ' + str(users.count()) + '</i></a> ]<br>'\r\n elif self.Session['access'] >= this_event.access:\r\n eventlist += '&nbsp;[ <i>Участников зарегистрировано: ' + str(users.count()) + '</i> ]<br>'\r\n self.insertTemplate('tpl_event_add.html', { 'eventlist': eventlist })\r\n\r\n \r\n #self.drawPage()\r", "def test_events(self):\n\n resp = self.client.get('/events?page=1&user_categories=113%2C105%2C104 ')\n self.assertTrue('next_events_url' in resp.context)\n self.assertTrue('previous_events_url' in resp.context)\n self.assertTrue('events_list' in resp.context)\n self.assertTrue('previous' in resp.context)\n self.assertTrue('next' in resp.context)\n self.assertEqual(resp.status_code, 200)", "def _parse_events(self, html):\n print \"Parse events\"\n data = []\n soup = BeautifulSoup(html, \"html.parser\")\n events = soup.find_all(\"div\", {\"class\": \"program clearfix\"})\n \"\"\" Site's html is broken. We have to handle descriptions \n with a hack.\n \"\"\"\n descriptions = soup.find_all(\"div\", {\"class\": \"programpostingress\"})\n for index, event in enumerate(events):\n link_tag = event.find(\"a\")\n if link_tag:\n link = link_tag[\"href\"]\n else:\n link = None\n dates = self._parse_date(self._parse_text(event.find(\"span\", {\"class\": \"programpostdato\"})))\n row = {\n \"title\": self._parse_text(event.find(\"span\", {\"class\": \"programposttittel\"})),\n \"date_start\": dates[\"start\"],\n \"date_end\": dates[\"end\"],\n \"description\": self._parse_text(descriptions[index]),\n \"link\": link,\n \"country\": \"Norge\"\n }\n data.append(row)\n print \"Found %s events\" % len(data)\n return data", "def events(bot, event, *args):\n yield from _printEventList(bot, event)", "def events():\n # Compare cache against a new GET request\n temp_cache = EVENTS_CACHED\n # events_new = get_calendar_events_today(CALENDAR_URL)\n events_new = get_calendar_events_limit(CALENDAR_URL, sort=False)\n\n # If not change is detected, tell the browser to keep it's current content.\n if temp_cache is None or compare_events(temp_cache, events_new):\n return \"false\"\n\n # Else, render the partial events template to return to the client.\n return render_template('events_sorted.html', events=sort_events_days(events_new))", "def iter_events(self, name):\n for event in self._get_events(name):\n yield event", "def listings(request, category1, category2, category3, page = 1):\n \n # Creating URL for request\n base_url = \"https://www.eventbriteapi.com/v3/events/search/\"\n token_component = \"token=BKKRDKVUVRC5WG4HAVLT\" #I had this token in my mail link\n category_component = \"categories=\" + category1 + ',' + category2 + ',' + category3\n page_component = \"page=\" + str(page)\n url_without_page = base_url + \"?\" + token_component + \"&\" + category_component\n url_complete = url_without_page + \"&\" + page_component\n \n # GET events from Eventbrite\n f = urllib2.urlopen(url_complete) \n json_string = f.read() \n parsed_json = json.loads(json_string) \n\n # Parse through JSON\n events = parsed_json['events']\n eventsList = []\n \n for i in events:\n eventsList.append(event_container())\n \n # Parse further through JSON\n eventsList[-1].name = i['name']['text']\n eventsList[-1].id = i['id']\n eventsList[-1].url = i['url']\n try:\n eventsList[-1].description = i['description']['text']\n except:\n eventsList[-1].description = \"No description available\"\n eventsList[-1].resource_uri = i['resource_uri']\n \n \n listings_url_base = '/topthree/listings/'+ category1 + '/' + category2 + '/' + category3 + '/'\n \n # Pagination\n \n \"\"\"\n Performing manual pagination instead of Django pagination \n because GET request for events pulls in paginated data already\n \"\"\"\n \n next_page = int(page) + 1\n next_page_url = listings_url_base + str(next_page) \n \n if int(page)>1:\n prev_page = int(page) - 1\n prev_page_url = listings_url_base + str(prev_page) \n\n else:\n prev_page = 0\n prev_page_url = \"#\"\n \n \n # Sending values to template\n \n template = loader.get_template('listings.html')\n\n context = RequestContext(request, {\n 'eventsList': eventsList,\n 'prev_page_url':prev_page_url,\n 'next_page_url':next_page_url,\n 'prev_page':prev_page,\n 'page':page,\n 'category1':category1,\n 'category2':category2,\n 'category3':category3,\n })\n \n return HttpResponse(template.render(context))", "def process_events(self, events):\n for game_event in events:\n if game_event:\n game_event = self._send_event(game_event)\n if game_event:\n yield game_event", "def logevents(self, events, request = None):\n for event in events:\n self.logevent(event, request)", "def parse(self, response):\n for nav_link in response.css(\".col-sm-7 a.btn\"):\n if \"?bd=\" in nav_link.attrib[\"href\"]:\n yield response.follow(\n nav_link.attrib[\"href\"], callback=self._parse_events_page\n )\n\n yield from self._parse_events_page(response)", "def get_events(self, cursor: Optional[str] = None) -> Iterator[dict]:\n args = {\"cursor\": cursor}\n\n yield from self._get_logbook_page(self.base_path, args)", "def event_processor(self, _events):\r\n\r\n event_list = []\r\n #iter a list of event names\r\n for event_name in _events:\r\n counter = {}\r\n try:\r\n #filter events in Events table by event name\r\n events_queryset = Event.objects.filter(event__name=event_name)\r\n counter['event'] = event_name\r\n counter['count'] = 0\r\n\r\n #django querysets is better to count with native counter instead of len()\r\n if events_queryset.count() > 0:\r\n\r\n #iter the objects instances and increase the counter\r\n for query in events_queryset:\r\n counter['count'] = counter['count'] + query.counter\r\n\r\n \r\n event_list.append([counter])\r\n\r\n except Exception as EventFilterException:\r\n print(EventFilterException)\r\n \r\n return event_list", "def events(self):\n self.add_events(Event.objects.filter(event_end__gt=timezone.now()).order_by('event_start'))\n self.filename = 'events'", "def events(self):\n self.add_events(Event.objects.filter(event_end__gt=timezone.now()).order_by('event_start'))\n self.filename = 'events'", "def page_through(app_id, app_secret):\n has_next_page = True\n in_date_range = True\n \n #we only want to keep the articles that were returned from the NYtimes api, so this creates a list of target urls\n with open('output/article_search.json') as f:\n nyt_dat = json.load(f)\n nyt_urls = []\n for i in nyt_dat:\n nyt_urls.append(core_url(i['web_url']))\n\n items = get_page(app_id, app_secret)\n process_items(items, nyt_urls)\n\n while has_next_page & in_date_range:\n if 'paging' not in items.keys():\n has_next_page=False\n\n if items['data'][0]['created_time'][0:7]=='2016-10':\n in_date_range = False\n\n items = json.loads(request_until_succeed(items['paging']['next']))\n process_items(items, nyt_urls)", "def parse(self, response):\n events = response.css(\n \"div.column.scroll-item.is-one-third-tablet.is-full-mobile\"\n )\n for event in events:\n if \"Friday Nights\" in event.css(\"span.card-title.h4 span::text\").get():\n datetimes = event.css(\"div.card-text.card-subhead span::text\").get()\n start_datetime, end_datetime = self.format_datetimes(datetimes)\n if start_datetime >= datetime.now() and start_datetime < datetime.now() + timedelta(\n weeks=4\n ):\n # the link is relative\n event_link = event.css(\"div.card-image a\").attrib[\"href\"]\n full_url = self.domain + event_link\n yield SplashRequest(\n url=full_url,\n callback=self.parse_event,\n method=\"GET\",\n endpoint=\"execute\",\n args={\"wait\": 15.0, \"lua_source\": self.lua_script},\n cb_kwargs={\n \"start_datetime\": start_datetime,\n \"end_datetime\": end_datetime,\n },\n )", "def main():\n credentials = get_credentials()\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n max = 7\n events = getEvents(credentials, now, max)\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])\n #addEvent(credentials)", "def test_get_events(self):\n\n request_params = {\n \"token\": EVENTBRITE_API_KEY,\n \"location.latitude\": \"37.4192008972\",\n \"location.longitude\": \"-122.057403564\",\n \"location.within\": \"20mi\",\n \"sort_by\": \"date\"\n }\n url_encoded_request_params = _update_urlencode_request_params(\"103,109\", 1, request_params)\n events_list, page_count = _get_events(url_encoded_request_params)\n self.assertTrue(type(events_list) is list)\n self.assertTrue(type(page_count) is int)", "def parse(self, response):\n if \"Calendar-and-Events\" in response.url:\n return self.parse_event_list(response)\n elif \"/events/\" in response.url:\n return self.parse_event_page(response)\n else:\n return self.parse_documents_page(response)", "def index():\n # return render_template('index.html', events=get_calendar_events_today(CALENDAR_URL))\n return render_template('index.html', events=get_calendar_events_limit(CALENDAR_URL), events_sorted=True)", "def homepage(request):\n template_var = base_template_vals(request)\n template_var[\"events\"] = Event.objects.filter(is_approved=True\n ).order_by(\"-created\")\n \n return render_to_response(\"event/event_homepage.html\", template_var,\n context_instance=RequestContext(request))", "def _all_pages(self, page_function, **kwargs) -> Iterator[Iterable]:\n\n next_token = None\n is_truncated = True\n while is_truncated:\n page = page_function(token=next_token, **kwargs)\n next_token = page.next_token\n is_truncated = page.is_truncated and next_token is not None\n for task in page.page_data:\n yield task", "def wiki_page_events(self) -> bool:\n return pulumi.get(self, \"wiki_page_events\")", "def test_get_events(self):\n events = gracedb.events()\n for event in events:\n self.assertTrue('graceid' in event)\n break", "def get_event_list(self):\n pass", "def get_events():\n url = app.config['EVENTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_events(response.json())\n raise RuntimeError('Error in retrieving events.')", "def __iter__(self):\n return _iterEvents(self._eventHandlers)", "def get_pages(epObject, fileDict):\r\n homePage = DOMAIN + epObject.ViewLink\r\n soup = make_soup(homePage)\r\n fileDict['pageUrls'].append(homePage)\r\n fileDict['pageFileNames'].append('index.html')\r\n fileDict['pageIds'].append(str(epObject.ObjectId))\r\n for a in soup.find_all('a', {'href': 'javascript://'}):\r\n if a['onclick'].find('GotoPage') > 0:\r\n pageId = get_page_id(str(a['onclick']), str(epObject.ObjectId))\r\n if pageId not in fileDict['pageIds']:\r\n address = homePage + \"&pageId={0}\".format(pageId)\r\n fileName = a.string.replace(' ', '').lower() + \".html\"\r\n fileDict['pageUrls'].append(address)\r\n fileDict['pageFileNames'].append(fileName)\r\n fileDict['pageIds'].append(pageId)\r\n return fileDict", "def events(self):\n return iter(self._collection.event_list())", "def parse_events(response):\n\n if not request_was_successful(response):\n print('WARNING: Unsuccessful HTTP response from eventful')\n return []\n\n json = response.json()\n if json.get('events') is None:\n print(\"ERROR: No eventful results on page\")\n return []\n\n # parse the events into a list of Event objects\n # print(json)\n events = []\n events.extend(map(Event, json['events']['event']))\n return events", "def _printEventList(bot, event):\n conv_event = bot.memory.get_by_path(['_event', event.conv_id])\n html = []\n for num, key in enumerate(sorted(conv_event, key=str)):\n segment = key.split(':')\n if segment[0] == \"event\":\n html.append(\"{}. <b>{}</b> [{} people]\".format(str(num), conv_event[\n key]['title'], len(conv_event[key]['participants'])))\n\n if len(html) == 0:\n yield from bot.coro_send_message(event.conv_id, '<i>No events available yet. Use <b>/event <eventname></b> to create your own event</i>')\n return\n # Generate the output list. Title first followed by the participants\n html.insert(0, _(\"<b>Current available events:</b>\"))\n message = _(\"<br />\".join(html))\n\n yield from bot.coro_send_message(event.conv_id, message)\n return", "def __init__(self, events):\n for event in events:\n #do stuff\n pass", "def runLoop(self, n):\n\t\t\n\t\tn = min(n, self.basic_header.GetEntries())\n\t\tprint \"Reading\", n, \"events\"\n\t\t\n\t\tfor i in range(n):\n\t\t\tfor module in self.modules:\n\t\t\t\tmodule.GetEntry(i)\n\t\t\tself.runEvent()", "def test_events(self):\n\n response = self.client.get(reverse('events'))\n\n assert response.status_code == 200", "def events(self):", "def test_page_existence(self):\r\n for page in self.pages:\r\n page.visit()", "def nextEvent(self, systemDate):\n while True:\n calendar_list = self.service.calendarList().list(pageToken=self.page_token).execute()\n #print(calendar_list) This will print the entire payload\n for calendar_list_entry in calendar_list['items']:\n if (calendar_list_entry['summary'] == 'Work'): # Check if the correct calender even exist in the calendar pool\n workFlag = True\n ID = (calendar_list_entry['id'])\n #print(ID)\n #print(\"The user calendar 'Work' exists\")\n self.page_token = calendar_list.get('nextPageToken')\n if not self.page_token:\n break\n\n # Call the Calendar API\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n events_result = self.service.events().list(calendarId=ID, timeMin=now,\n maxResults=20, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n\n if not events:\n #Uncomment for debugging\n #print(\"No upcoming events founds.\")\n self.recEvent = None\n for event in events:\n start = event['start'].get('dateTime') #Ex:2021-02-13T03:30:00-08:00 (date)T(Time-Timezone)\n splDate = start.split(\"T\")\n stime = splDate[1].split(\"-\")\n rstime = stime[0].rsplit(\":\", 1)\n end = event['end'].get('dateTime')\n end1= end.split(\"T\")\n endtime= end1[1].split(\"-\")\n rendtime = endtime[0].rsplit(\":\", 1)\n title = (event['summary'])\n print(title)\n #The first events of the same date will be added to the table\n if systemDate == splDate[0]:\n if (\"Meeting\") in title:\n print(\"Upcoming meeting:\", title)\n self.recEvent = {'Title' : title, 'Date': splDate[0], 'Start' : rstime[0], 'End' : rendtime[0]}\n break\n else:\n self.recEvent = None\n else:\n self.recEvent = None\n self.workFlag = False\n del now\n return (self.recEvent)", "def news_and_events(self):\n return self._get_child_page_of_type(NewsAndEventsPage)", "def visit_event(self, event):", "def __calender_events(self):\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n pt=\"Getting the upcoming latest events\"\n requests.get(\"http://localhost:8080/statement?text=%s\" % pt)\n self.speech.synthesize_text(pt)\n eventsResult = service.events().list(\n calendarId='primary', timeMin=now, maxResults=1, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n\n if not events:\n pq=\"No upcoming events found.\"\n requests.get(\"http://localhost:8080/statement?text=%s\" % pt)\n self.speech.synthesize_text(pq)\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n #start1=''.join(start)\n summary=event['summary']\n print start,summary\n requests.get(\"http://localhost:8080/statement?text=\"+start+\" \"+summary)", "def events_iter(self):\n for event_type in self._watchable_events:\n yield event_type", "def parse(self, response):\n meeting_types = ['admin-opp-committee-meeting', 'audit-committee', 'board-meeting']\n\n data = json.loads(response.text)\n for item in data:\n if (item.get('category') != [] and item.get('category')[0] in meeting_types):\n name, dt_time = self._parse_name_time(item['title'])\n start = self._parse_time_dict(self._parse_datetime(item['start']), dt_time)\n end = self._parse_time_dict(self._parse_datetime(item['end']), dt_time)\n end['date'] = start['date']\n if start['time'] == end['time']:\n end['time'] = None\n item_data = {\n '_type': 'event',\n 'name': name,\n 'description': item['description'],\n 'classification': self._parse_classification(item.get('category')[0]),\n 'start': start,\n 'end': end,\n 'all_day': False,\n 'timezone': self.timezone,\n 'sources': self._parse_sources(item)\n }\n item_data['status'] = self._generate_status(item_data)\n item_data['id'] = self._generate_id(item_data)\n\n # If it's a board meeting, return description\n if item['category'][0] in ['board-meeting', 'admin-opp-committee-meeting']:\n yield self._board_meeting(item_data)\n else:\n # Request each relevant event page,\n # including current data in meta attr\n req = scrapy.Request(\n item['url'],\n callback=self._parse_event,\n dont_filter=True,\n )\n req.meta['item'] = item_data\n yield req", "def parse_event(self, response):\n # gather rounds from the event page sidebar\n round_urls = response.css('div.navilevel3 p a::attr(href)').getall()\n round_titles = response.css('div.navilevel3 p a::text').getall()\n\n for round_url, round_title in zip(round_urls, round_titles):\n # check if this round has already been scraped in an early scraping run\n round_title = regex_replace(round_title)\n if not self.check_already_scraped(season_title=response.meta['season_title'],\n competition_title=response.meta['competition_title'],\n event_title=response.meta['event_title'],\n instance_of_event_in_competition=response.meta[\n 'instance_of_event_in_competition'],\n event_gender=response.meta['event_gender'],\n round_title=round_title):\n # assemble direct URL for the round\n full_round_url = response.urljoin(round_url)\n\n # pass along metadata for use in next steps\n round_details = parse_qs(urlsplit(full_round_url).query)\n response.meta.update(dict(round_title=round_title,\n round_id=round_details.get(\"ref\", np.nan)))\n\n # scrape the round page\n yield scrapy.Request(url=full_round_url,\n callback=self.parse_round,\n meta=response.meta)", "def list(request, template='events/list.html'):\n return render(request, template, {\n 'events': Event.objects.get_upcoming().order_by('start_date'),\n })", "def test_pagination(self):\n for page in range(1, 5):\n self._test_one_page(page=page)", "def handleEvents(self, events):\n pass", "def test_query_events_with_pagination(self):\n CommonTestCases.admin_token_assert_equal(\n self,\n query_events_with_pagination,\n event_query_with_pagination_response\n )", "def test_query_events_missing_page(self):\n CommonTestCases.admin_token_assert_in(\n self,\n query_events_per_page_without_page,\n \"page argument missing\"\n )", "def get_tagged_events():\n\n f = open('event_info.txt', 'w+')\n f.write('')\n f.close()\n\n for category in MEETUP_TAGS:\n events_added = 0\n days = 5\n while events_added < NUM_EVENTS:\n\n urls = set()\n\n today = datetime.date.today()\n tomorrow = today\n\n tomorrow = tomorrow + datetime.timedelta(days=days)\n\n # https://www.meetup.com/find/events/arts-culture/?allMeetups=false&radius=5&userFreeform=New+York%2C+NY&mcId=z10025&month=4&day=20&year=2018&eventFilter=all\n\n url = 'www.meetup.com/find/events/{}/?allMeetups=true&radius=20 \\\n &userFreeform=New+York%2C+NY&mcId=c10001&mcName=New+York%2C+NY \\\n &month={}&day={}&year={}'.format(category,\n tomorrow.month,\n tomorrow.day,\n tomorrow.year)\n\n r = requests.get('https://' + url)\n print('https://' + url)\n data = r.text\n soup = BeautifulSoup(data)\n\n for link in soup.find_all('a'):\n href = link.get('href')\n if '/events/' in href and '/find/' not in href:\n urls.add(href)\n\n if not urls:\n break\n\n for url in urls:\n os.system('python retrieval.py ' + url + ' ' + category)\n events_added += 1\n if events_added > NUM_EVENTS:\n break\n\n print('Finished ' + str(days))\n days += 1", "async def get_events(self) -> list[Event]:\n log.debug(\"Discovering events in branding repository.\")\n\n try:\n event_directories = await self.fetch_directory(\"events\", types=(\"dir\",)) # Skip files.\n except Exception:\n log.exception(\"Failed to fetch 'events' directory.\")\n return []\n\n instances: list[Event] = []\n\n for event_directory in event_directories.values():\n log.trace(f\"Attempting to construct event from directory: '{event_directory.path}'.\")\n try:\n instance = await self.construct_event(event_directory)\n except Exception as exc:\n log.warning(f\"Could not construct event '{event_directory.path}'.\", exc_info=exc)\n else:\n instances.append(instance)\n\n return instances", "def updateEvents(self):\n # Update calendar data\n d_start = datetime.datetime.today()\n d_end = d_start + datetime.timedelta(self.delta_days)\n results = self.cal_cal.date_search(d_start, d_end)\n\n # Flush the events dict\n self.events = []\n # Add each events\n for event in results:\n # Format the title of the event\n str_title = event.instance.vevent.summary.value\n if len(str_title) > 20:\n str_title = str_title[:17] + \"...\"\n # Format the date of the event\n vdate = event.instance.vevent.dtstart.value\n d = datetime.datetime.strptime(\n vdate.strftime(\"%d %m %Y\"), \"%d %m %Y\")\n str_date = \"%s %d %s\" % (\n self.days_french[d.weekday()],\n d.day,\n self.months_french[d.month -1])\n # Format the date gap\n gap = 1 + (d - d_start).days\n # Save the event\n self.events.append((str_title, str_date, gap))", "def test_upcoming_events(self, client, site, homepage, events):\n response = client.get(homepage.relative_url(site))\n\n # should have link to event list\n assertContains(response, reverse(\"events:upcoming\"))\n\n # only one event in context, since others already happened\n assert len(response.context[\"events\"]) == 1\n assert events[\"workshop\"] not in response.context[\"events\"]\n assert events[\"lecture\"] not in response.context[\"events\"]\n\n # shows event title, start/end time in local tz, and link to view\n est = zoneinfo.ZoneInfo(\"America/New_York\")\n assertContains(response, events[\"deadline\"].get_url())\n assertContains(response, events[\"deadline\"].title)\n assertContains(\n response,\n format(\n events[\"deadline\"].start_time.astimezone(est),\n \"F j\",\n ),\n )\n\n # shouldn't show if not published\n events[\"deadline\"].unpublish()\n response = client.get(homepage.relative_url(site))\n assert events[\"deadline\"] not in response.context[\"events\"]", "def __next_page(self):\n self.current_page = self.current_page + 1\n tree = ET.parse(urlopen(self.url + '&start=' + str(self.current_page)))\n self.iterator = tree.iterfind(self.GLOBAL_NP + 'entry')", "def __iter__(self):\n while self.has_next_page():\n response = self.get_next_page_response()\n for item in self.get_items_from_response(response):\n yield item", "async def _e_list(self, ctx):\n event_list = self.database.get_guild_events(ctx.guild.id)\n if len(event_list) == 0:\n await ctx.send(\"This server has no custom events\")\n return\n out = \"```\\nServer Events:\\n\"\n for event in event_list:\n out += f\"{event.name} - {event.period}: {event.text}\\n\"\n out += \"```\"\n await ctx.send(out)", "def get_events(query):\n pagination = EventModel.query.paginate(\n page=query['page'],\n per_page=query['per_page']\n )\n return {\n 'events': pagination.items,\n 'pagination': pagination_builder(pagination)\n }", "def on_page_changed(e):\n\n e.Skip()", "def get_all_events(cls):\n try:\n events = list(events_coll.find())\n events_list = []\n if events is not None:\n for event in events:\n one_event = cls(**event)\n events_list.append(one_event)\n return events_list\n except Exception as e:\n print(e)", "def parse(self, response):\n event_list_container = response.css(\"dl.simcal-events-list-container\")\n for event_date, event_details in zip(\n event_list_container.css(\"dt.simcal-day-label\"), event_list_container.css(\"dd.simcal-day\")\n ): \n #format of date is Monday, April 13th\n date = event_date.css(\".simcal-date-format::text\").get()\n for event_detail in event_details.css(\"li.simcal-event\"):\n artist_item = ArtistItem()\n performance_item = PerformanceItem()\n\n start_time = event_detail.css(\".simcal-event-start-time::text\").get()\n end_time = event_detail.css(\".simcal-event-end-time::text\").get()\n title = event_detail.css(\".simcal-event-title::text\").get()\n\n performance_item[\"title\"] = title\n performance_item[\"url\"] = self.start_urls[0]\n performance_item[\"start_datetime\"] = self.format_datetime(date, start_time)\n performance_item[\"end_datetime\"] = self.format_datetime(date, end_time)\n performance_item[\"url\"] = self.start_urls[0]\n \n artist_item[\"name\"] = title\n\n performance_item[\"artist\"] = dict(artist_item)\n performance_item[\"venue\"] = dict(heritage_item)\n yield performance_item", "def test_event_view_list(self):\n response = self.client.get('/module/event/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'frontend/appointment/event/list.html')\n\n request = self.factory.get('/module/calendar/')\n request.user = self.user\n request.session = {}\n response = event_list(request)\n self.assertEqual(response.status_code, 200)", "def events(self, p=_P, start=0, end=0, limit=0, skip=0, page=1,\n category='all', context='all'):\n\n self._valdiate_param(start=start, end=end,\n limit=limit, skip=skip, page=page)\n \n q = {\"itype\": \"event\", **dtresolve(start, end)}\n\n if category != \"all\":\n self._validate_param(category=category)\n q['category'] = category\n\n if context != \"all\":\n self._validate_param(context=context)\n switch = {'all': '_ref', 'benign': '_ben_ref',\n 'malicious': '_mal_ref', 'unknown': '_ref'}\n q[switch.get(context)] = self._hash\n if context == 'unknown':\n q['_ben_ref'] = {'$ne': self._hash}\n q['_mal_ref'] = {'$ne': self._hash}\n \n return self._backend.find(q, p, **limitskip(limit, skip, page))", "def events(self):\n return self.search(comp_class=Event)", "def read_events():\n service = setup_google_calendar()\n dict = {}\n # Call the Calendar API\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 10 events')\n events_result = service.events().list(calendarId='primary', timeMin=now,\n maxResults=10, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n\n if not events:\n print('No upcoming events found.')\n i = 0\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])\n dict[i] = (start, event['summary'])\n i += 1\n return dict", "def _PrintAllEventsOnDefaultCalendar(self):\n\n feed = self.cal_client.GetCalendarEventFeed()\n print 'Events on Primary Calendar: %s' % (feed.title.text,)\n for i, an_event in zip(xrange(len(feed.entry)), feed.entry):\n print '\\t%s. %s' % (i, an_event.title.text,)\n for p, a_participant in zip(xrange(len(an_event.who)), an_event.who):\n print '\\t\\t%s. %s' % (p, a_participant.email,)\n print '\\t\\t\\t%s' % (a_participant.value,)\n if a_participant.attendee_status:\n print '\\t\\t\\t%s' % (a_participant.attendee_status.value,)", "def loop_pages(self, response):\n\n current_page = response.xpath(\"//a[@class='currentPage ']/text()\")\n print(\"current page: {0}\".format(current_page.extract_first()))\n\n next_page_link = response.xpath(\"//a[@class='text' and contains(., 'Next')]\")\n next_page_link = next_page_link.xpath('@href').extract_first()\n\n # urls_stories is a tuple with a url, and a corresponding Story object\n urls_stories = self.get_thread_urls(response)\n\n if self.generate_test is None:\n # generate requests for -- new -- stories\n for (url, story) in urls_stories:\n yield scrapy.Request(url, callback=self.scan_thread, priority=1, meta={\"story_item\": story})\n\n # generate requests for stories that need to be updated.\n for (url, story) in self.update_list:\n yield scrapy.Request(url, callback=self.update_stories, priority=2, meta={\"story_item\": story})\n\n if next_page_link is not None:\n\n # print(\"next page link: {0}\".format(next_page_link))\n next_page_link = response.urljoin(next_page_link)\n yield scrapy.Request(next_page_link, callback=self.loop_pages, priority=0)\n else:\n \"\"\"\n This section activates if self.generate_test is not None.\n A thread url is required to be provided to generate a test scenario out of that\n thread.\n It scans the site looking for this thread, and scrapes it.\n If it doesn't find it, it scans the next page.\n \"\"\"\n print(\"\\n\\tGENERATING TEST SCENARIO\\n\")\n for (url, story) in urls_stories:\n if url == self.test_url:\n yield scrapy.Request(url, callback=self.scan_thread, priority=0, meta={\"story_item\": story})\n return\n\n for (url, story) in self.update_list:\n if url == self.test_url:\n yield scrapy.Request(url, callback=self.scan_thread, priority=0, meta={\"story_item\": story})\n return\n\n next_page_link = response.urljoin(next_page_link)\n yield scrapy.Request(next_page_link, callback=self.loop_pages, priority=0)", "def render_all(pages):\n for page in pages:\n render_template(page['template'], page['output'], page['values'])", "def iteratePageItems(self, page, func=dict):\n\n for item in page.items:\n yield func(**item)\n\n if page.nextPageUrl:\n res = self.getRequest(page.nextPageUrl)\n nextPage = vsdModels.Pagination(**res)\n for nextItem in self.iteratePageItems(nextPage, func=func):\n yield nextItem", "def endpoints(self):\n\n # Yields the home page\n gui_uri = self.app.config.GUI_PX\n yield self.page(MiyagiAppHome, gui_uri)\n\n # Yields the process list page\n processes_uri = f'{gui_uri}{self.app.config.PROCESSES_PX}'\n yield self.page(ProcessesPage, processes_uri)\n\n for p_name, process in self.app.processes.items():\n # For every process yields the relative general page\n process_uri = f'{processes_uri}/{p_name}'\n yield self.page(\n ProcessPage,\n process_uri,\n process=process\n )\n for obj in process.objects:\n # For every object in the process yields the relative page\n # List of instances + general object actions\n object_uri = f'{process_uri}{self.app.config.OBJECTS_PX}/{obj.name.lower()}'\n yield self.page(\n ObjectPage,\n object_uri,\n handler='generic_handler',\n methods=['GET', ],\n process=process,\n obj=obj\n )\n\n # For every object in the process yields the object creation form\n yield self.page(\n ObjectEditPage,\n f'{object_uri}/<uid>',\n handler='create_modify_object_handler',\n methods=['GET', 'POST'],\n process=process,\n obj=obj\n )\n # TODO: object remove endpoint\n\n # TODO: object actions endpoints\n # Object class methods\n\n # TODO: process actions endopoints\n\n # TODO: System endpoints and controllers", "def iter_page_links(self) -> Iterable[str]:\n base_url = 'https://health.mil/About-MHS/OASDHA/Defense-Health-Agency/Resources-and-Management/DHA-Publications'\n yield base_url", "def events(self):\r\n return resources.Events(self)", "def fetch_events(self):\n while 1:\n try:\n self.events_local.append(self._q.get(False))\n except queue.Empty:\n break", "def view_event_list(request, **kwargs):\n #lu = get_common_lookup(request)\n lu = { 'page_title' : 'MCB Event Tweets'\\\n , 'IS_TWEET_EVENT_PAGE' : True\n , 'TWEET_SUCCESS' : kwargs.get('success_msg', False)\n }\n \n if not request.user.is_authenticated():\n return HttpResponse('not logged in')\n \n if not is_user_in_group(request, TWEET_GROUP_NAME):\n return HttpResponse('not in tweet group')\n \n upcoming_events = MCBTweetEvent.get_events_awaiting_approval()\n \n lu.update({ 'upcoming_events' : upcoming_events\\\n #, 'my_checked_codes' : get_previously_checked_expense_codes(request)\\\n })\n #\n return render_to_response('tweet/events/event_list.html', lu, context_instance=RequestContext(request))", "def loadEvents(self, eventlist):\n for event in eventlist:\n event.ID = self.loadEvent(event.name, event.pid)", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 10 events')\n eventsResult = service.events().list(\n calendarId='giftee.co_2d3935303830373930333936@resource.calendar.google.com', timeMin=now, maxResults=10, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n\n\n # TODO noitem found\n print(datetime.datetime.strptime(events[0]['start']['dateTime'], '%Y-%m-%dT%H:%M:%S+09:00'))\n\n nextStartTime = datetime.datetime.strptime(events[0]['start']['dateTime'], '%Y-%m-%dT%H:%M:%S+09:00')\n delta = (nextStartTime - datetime.datetime.now()).total_seconds()\n\n if delta < 0:\n print(\"capture next\")\n nextStartTime = datetime.datetime.strptime(events[1]['start']['dateTime'], '%Y-%m-%dT%H:%M:%S+09:00')\n delta = (nextStartTime - datetime.datetime.now()).total_seconds()\n\n print(delta)\n\n if NOTIFY_THRESHOLD_SECOND > delta:\n alert_time_limit()\n else:\n set_normal()\n\n\n\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])", "def yield_chunked_events(self, events):\n for i in range(0, len(events), 5000):\n yield events[i:i + 5000]", "def get_data(self):\n has_next_page = True\n page = 1\n while has_next_page:\n print(f'Getting page {page}')\n response = self.get_articles(\n page=page,\n size=200,\n order_by='extracted_at',\n order_type='asc'\n )\n pagination = response.get('pagination')\n has_next_page = pagination.get('has_next')\n self.save_articles(response.get('articles'))\n page += 1\n time.sleep(2.5)", "def show_events(request):\n event_list = Event.objects.order_by('-date')\n\n event_form = EventForm()\n\n context = {'events': event_list, 'form': event_form}\n return render(request, 'metro_app/events_view.html', context)", "def print_event_list(event_list, my_file):\n\tlogger.debug(\"Outputting list of events to page...\")\n\tlogger.debug(\"List:\\n\" + str(event_list))\n\tfor event in event_list:\n\t\tprint >> my_file, \"\"\"<TR>\"\"\"\n\t\tprint_event(event, my_file)\n\t\tprint >> my_file, \"\"\"</TR>\"\"\"", "def get_events(self):\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedError" ]
[ "0.7203284", "0.6670896", "0.66397905", "0.65484786", "0.65141577", "0.64590824", "0.64310527", "0.6378027", "0.6360895", "0.63342434", "0.6316285", "0.6311675", "0.6298735", "0.6272081", "0.6216796", "0.61802506", "0.61441034", "0.6130424", "0.6043177", "0.60396385", "0.6032347", "0.6019611", "0.6004777", "0.5999012", "0.599123", "0.59719354", "0.5967197", "0.59616375", "0.5939878", "0.59361994", "0.5935849", "0.5928883", "0.5923571", "0.5923571", "0.59116995", "0.5906134", "0.5903273", "0.59028494", "0.5896619", "0.5869062", "0.5846928", "0.58438414", "0.5839228", "0.5821847", "0.58197635", "0.5813647", "0.58035564", "0.57842404", "0.57759434", "0.57586235", "0.575622", "0.57444084", "0.5736881", "0.5721976", "0.5710976", "0.5710184", "0.56976366", "0.5683372", "0.5654597", "0.5637714", "0.563366", "0.56312954", "0.5629878", "0.56228346", "0.5622708", "0.5594909", "0.5590765", "0.55867356", "0.55723196", "0.5567434", "0.55646646", "0.5560183", "0.5559329", "0.5558029", "0.55472386", "0.55376273", "0.5533775", "0.55255765", "0.55198485", "0.5513931", "0.5512354", "0.55080986", "0.55057865", "0.54935145", "0.54923016", "0.5489057", "0.54842263", "0.5469029", "0.54668605", "0.54598033", "0.54565734", "0.5448818", "0.54478043", "0.54323894", "0.54300445", "0.54262656", "0.5419675", "0.5415687", "0.5403071", "0.5403071" ]
0.7202085
1
Setup the Binary Sensor platform fo EnOcean.
def setup_platform(hass, config, add_devices, discovery_info=None): dev_id = config.get(CONF_ID, None) devname = config.get(CONF_NAME, "EnOcean binary sensor") add_devices([EnOceanBinarySensor(dev_id, devname)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n if discovery_info is None:\n return\n binary_sensors = []\n for name in hass.data[DOMAIN]:\n if name in BINARY_SENSORS:\n binary_sensors.append(NextcloudBinarySensor(name))\n add_entities(binary_sensors, True)", "def setUp(self):\n self.platform = wirelesstagpy.WirelessTags(username=USERNAME, password=PASSWORD)\n self.tag_outdoor = wirelesstagpy.SensorTag(MOCK.OUTDOOR_PROBE, self.platform)\n self.platform._tags[\"fake-1\"] = self.tag_outdoor # pylint: disable=protected-access", "def setUp(self):\n self.ser = Serial()\n self.device_obj = ZBSensor(self.ser)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n if discovery_info is None:\n return\n\n data = hass.data[LUPUSEC_DOMAIN]\n\n device_types = [CONST.TYPE_OPENING]\n\n devices = []\n for device in data.lupusec.get_devices(generic_type=device_types):\n devices.append(LupusecBinarySensor(data, device))\n\n add_entities(devices)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config.get(CONF_NAME)\n mac = config.get(CONF_MAC)\n _LOGGER.debug(\"Setting up\")\n\n mon = Monitor(hass, mac, name)\n add_entities([SkybeaconTemp(name, mon)])\n add_entities([SkybeaconHumid(name, mon)])\n\n def monitor_stop(_service_or_event):\n \"\"\"Stop the monitor thread.\"\"\"\n _LOGGER.info(\"Stopping monitor for %s\", name)\n mon.terminate()\n\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)\n mon.start()", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n host = config[CONF_HOST]\n monitored_variables = config[CONF_MONITORED_VARIABLES]\n\n charger = openevsewifi.Charger(host)\n\n entities = [\n OpenEVSESensor(charger, description)\n for description in SENSOR_TYPES\n if description.key in monitored_variables\n ]\n\n add_entities(entities, True)", "def setUp(self):\n self.sensor = Sensor('127.1.1.3', 9000)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config.get(CONF_NAME)\n host = config[CONF_HOST]\n port = config[CONF_PORT]\n username = config[CONF_USERNAME]\n password = config[CONF_PASSWORD]\n\n customize = config[CONF_CUSTOMIZE]\n\n protocol = \"https\" if config[CONF_SSL] else \"http\"\n\n url = f\"{protocol}://{host}\"\n\n data = HikvisionData(hass, url, port, name, username, password)\n\n if data.sensors is None:\n _LOGGER.error(\"Hikvision event stream has no data, unable to set up\")\n return\n\n entities = []\n\n for sensor, channel_list in data.sensors.items():\n for channel in channel_list:\n # Build sensor name, then parse customize config.\n if data.type == \"NVR\":\n sensor_name = f\"{sensor.replace(' ', '_')}_{channel[1]}\"\n else:\n sensor_name = sensor.replace(\" \", \"_\")\n\n custom = customize.get(sensor_name.lower(), {})\n ignore = custom.get(CONF_IGNORED)\n delay = custom.get(CONF_DELAY)\n\n _LOGGER.debug(\n \"Entity: %s - %s, Options - Ignore: %s, Delay: %s\",\n data.name,\n sensor_name,\n ignore,\n delay,\n )\n if not ignore:\n entities.append(\n HikvisionBinarySensor(hass, sensor, channel[1], data, delay)\n )\n\n add_entities(entities)", "def setup_sensors(self):\n super(EddRoach2ProductController, self).setup_sensors()\n self._firmware_server_sensor = Sensor.string(\n \"firmware-server\",\n description=\"The address of the firmware server started by this product\",\n default=\"\",\n initial_status=Sensor.UNKNOWN)\n self.add_sensor(self._firmware_server_sensor)\n self._parent.mass_inform(Message.inform('interface-changed'))", "def setUp(self):\n import protolibs.ics_servers as ics_servers\n from point import Point\n from configobj import ConfigObj\n\n # Get config file\n configfile = '/'.join(['sims', 'rtutank', 'config'])\n config=ConfigObj(infile=configfile, unrepr=True)\n self.config = config\n #Set global variable devconfig here \n devconfig=config['vdevs']['slave'] \n\n ##--Set up points\n points={}\n for p in devconfig['points']:\n points.update( { p['name'] : Point(**p) } ) \n #The ** treats the p dictionary as the arguments to the Point class\n self.server = ics_servers.ModbusRTU(devconfig['icsifaces'][0], points.values())\n self.server.start()", "def configure(self):\n\n self.platform.configure()", "def setup_platform(hass, config, add_entities, discovery_info=None):\n if discovery_info is None:\n return\n\n devices = []\n for vin, datastore in hass.data[DATA_LEAF].items():\n _LOGGER.debug(\"Adding binary_sensors for vin=%s\", vin)\n devices.append(LeafPluggedInSensor(datastore))\n devices.append(LeafChargingSensor(datastore))\n\n add_entities(devices, True)", "def __init__(self, parent, endpoint):\n Wemo_Endpoint.__init__(self, parent, endpoint)\n self.device_type = self._Parent._DeviceTypes.get('wemo_binary_sensor')\n self.FEATURES.update({\n FEATURE_BRIGHTNESS: False,\n FEATURE_PERCENT: False,\n FEATURE_NUMBER_OF_STEPS: False\n })", "def setup_platform(hass, config, add_devices, discovery_info=None):\n hub.update()\n\n for vacbot in hub.vacbots:\n add_devices([DeebotMopAttachedBinarySensor(vacbot, \"mop_attached\")], True)", "async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n binary_sensors = []\n invert_logic = config[CONF_INVERT_LOGIC]\n pin_mode = config[CONF_PIN_MODE]\n ports = config[CONF_PORTS]\n\n setup_mode(pin_mode)\n\n for port_num, port_name in ports.items():\n binary_sensors.append(\n OPiGPIOBinarySensor(hass, port_name, port_num, invert_logic)\n )\n async_add_entities(binary_sensors)", "def setup_platform(hass, config, add_devices, discovery_info=None):\r\n pull_mode = config[CONF_PULL_MODE]\r\n invert_logic = config[CONF_INVERT_LOGIC]\r\n\r\n iopi = IOPi(config.get(CONF_I2C_ADDRESS), True)\r\n\r\n binary_sensors = []\r\n pins = config[CONF_PINS]\r\n\r\n for pin_num, pin_name in pins.items():\r\n binary_sensors.append(abelectronicsiopiBinarySensor(pin_name, pin_num, pull_mode, invert_logic, iopi))\r\n add_devices(binary_sensors, True)", "def setUp(self):\n import protolibs.ics_servers as ics_servers\n from point import Point\n from configobj import ConfigObj\n\n # Get config file\n configfile = '/'.join(['sims', 'tcptank', 'config'])\n config=ConfigObj(infile=configfile, unrepr=True)\n self.config = config\n #Set global variable devconfig here \n devconfig=config['vdevs']['slave'] \n\n ##--Set up points\n points={}\n for p in devconfig['points']:\n points.update( { p['name'] : Point(**p) } ) \n #The ** treats the p dictionary as the arguments to the Point class\n self.server = ics_servers.ModbusTCP( devconfig['icsifaces'][0], points.values() )\n self.server.start()", "def _init_hardware(self):\n return", "def startup( self ):\n # ---- Setup UPNPC ----\n if self.config.neuron.use_upnpc:\n bittensor.logging.success(prefix = 'Set upnpc', sufix = '<green>ON</green>')\n try:\n self.external_port = net.upnpc_create_port_map( port = self.axon.port )\n except net.UPNPCException as upnpc_exception:\n logger.critical('Failed to hole-punch with upnpc')\n raise RuntimeError('Failed to hole-punch with upnpc')\n else:\n bittensor.logging.success(prefix = 'Set upnpc', sufix = '<red>OFF</red>')\n self.external_port = self.config.axon.port\n\n # ---- Get external ip ----\n try:\n self.external_ip = net.get_external_ip()\n bittensor.logging.success(prefix = 'External IP', sufix = '<blue>{}</blue>'.format(self.external_ip))\n except net.ExternalIPNotFound as external_port_exception:\n raise RuntimeError('Unable to attain your external ip. Check your internet connection. error:{}', external_port_exception)\n\n # ---- Setup tensorboard ----\n if self.config.neuron.use_tensorboard == True:\n self._tensorboard_program = program.TensorBoard()\n self._tensorboard_program.configure(argv=[None, '--logdir', self.config.neuron.full_path, '--load_fast=true'])\n self._tensorbaord_url = self._tensorboard_program.launch()\n bittensor.logging.success(prefix = 'Set tensorboard', sufix = '<blue>http://localhost:6006/</blue>')\n else: bittensor.logging.success(prefix = 'Set tensorboard', sufix = '<red>OFF</red>')\n\n # ---- Setup Wallet. ----\n if not self.wallet.has_coldkeypub:\n self.wallet.create_new_coldkey( n_words = 12, use_password = True )\n if not self.wallet.has_coldkeypub:\n raise RuntimeError('Miner must have access to a decrypted coldkeypub')\n if not self.wallet.has_hotkey:\n self.wallet.create_new_hotkey( n_words = 12, use_password = False )\n if not self.wallet.has_hotkey:\n raise RuntimeError('Miner must have access to a decrypted hotkey')\n\n # ---- Subscribe to chain ----\n subscribe_success = self.subtensor.subscribe(\n wallet = self.wallet,\n ip = self.external_ip,\n port = self.external_port,\n modality = bittensor.proto.Modality.TEXT,\n wait_for_finalization = True,\n timeout = 4 * bittensor.__blocktime__,\n )\n if not subscribe_success:\n raise RuntimeError('Failed to subscribe neuron.')\n\n # ---- Starting axon ----\n self.axon.start()", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n host: str = config[CONF_HOST]\n port: int = config[CONF_PORT]\n name: str = config[CONF_NAME]\n url = f\"http://{host}:{port}/api/LiveData.xml\"\n\n gateway = Ted5000Gateway(url)\n\n # Get MUT information to create the sensors.\n gateway.update()\n\n entities = []\n for mtu in gateway.data:\n for description in SENSORS:\n entities.append(Ted5000Sensor(gateway, name, mtu, description))\n\n add_entities(entities)", "def setup(self):\n\n self._enable_torque(self._reg.TORQUE_ENABLE)\n self.change_operating_mode(self._reg.MODE_EXT_POSI)\n # set to max velocity\n self.change_veloity(self._default_velocity)", "def setUpEnv(self):\n \n robot = Robot('atrv')\n\n pose = Sensor('pose')\n robot.append(pose)\n pose.configure_mw('yarp')\n\n motion = Actuator('v_omega')\n robot.append(motion)\n motion.configure_mw('yarp')\n \n env = Environment('indoors-1/indoor-1')\n env.configure_service('socket')", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def __init__(self):\n self.hw = dev_hwinfo.device()\n self.ethKey=\"Ethernet\"\n self.ethAllInterfaceName=[]\n dir_path = os.path.dirname(os.path.realpath(__file__))\n self.myDefine = init_define.main()\n self.mPlatform=self.hw.getPlatform()", "def setup(hass, base_config):\n from pyhusmow import API as HUSMOW_API\n\n config = base_config.get(DOMAIN)\n\n if hass.data.get(DOMAIN) is None:\n hass.data[DOMAIN] = { 'devices': [] }\n\n api = HUSMOW_API()\n api.login(config.get(CONF_USERNAME), config.get(CONF_PASSWORD))\n\n robots = api.list_robots()\n\n if not robots:\n return False\n\n for robot in robots:\n hass.data[DOMAIN]['devices'].append(AutomowerDevice(robot, api))\n\n for component in AUTOMOWER_COMPONENTS:\n discovery.load_platform(hass, component, DOMAIN, {}, base_config)\n\n return True", "def initialize(self):\n self.log.info(\"Initialize raspPinball hardware.\")\n\n self.config = self.machine.config['rasppinball']\n self.machine.config_validator.validate_config(\"rasppinball\", self.config)\n print(\"***************************\")\n print(self.config)\n #self.machine_type = (\n # self.machine.config['hardware']['driverboards'].lower())\n\n self._connect_to_hardware()\n\n\n # keypad\n self._kp = Keypad()\n self.old_key = \"\"\n self.key = \"\"\n # leds\n self.init_strips()", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config[CONF_NAME]\n host = config[CONF_HOST]\n entity = OppleLight(name, host)\n\n add_entities([entity])\n\n _LOGGER.debug(\"Init light %s %s\", host, entity.unique_id)", "def initialize_electronics(self):\n\n self.electronics = ArduinoModel(**self.config['electronics']['arduino'])\n self.logger.info('Initializing electronics arduino')\n self.electronics.initialize()", "def test_version_sensor(self):\n config = {\"sensor\": {\"platform\": \"version\"}}\n\n assert setup_component(self.opp, \"sensor\", config)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None\n) -> None:\n # Assign configuration variables.\n # The configuration check takes care they are present.\n host = config[CONF_HOST]\n username = config[CONF_USERNAME]\n password = config.get(CONF_PASSWORD)\n\n # Setup connection with devices/cloud\n hub = awesomelights.Hub(host, username, password)\n\n # Verify that passed in configuration works\n if not hub.is_valid_login():\n _LOGGER.error(\"Could not connect to AwesomeLight hub\")\n return\n\n # Add devices\n add_entities(AwesomeLight(light) for light in hub.lights())", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n\n host = config[CONF_HOST]\n port = config[CONF_PORT]\n token = config.get(CONF_ACCESS_TOKEN)\n\n client = ClementineRemote(host, port, token, reconnect=True)\n\n add_entities([ClementineDevice(client, config[CONF_NAME])])", "def __init__(self):\n\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.color_sensor = ev3.ColorSensor()\n self.ir_sensor = ev3.InfraredSensor()\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n\n assert self.left_motor.connected\n assert self.right_motor.connected\n assert self.arm_motor.connected\n assert self.touch_sensor\n assert self.color_sensor\n assert self.ir_sensor\n assert self.pixy", "def setup_platform(hass, config, add_devices_callback, discovery_info=None):\n host = config.get(CONF_HOST)\n name = config.get(CONF_NAME)\n token = config.get('token')\n\n add_devices_callback([MiroboSwitch(name, host, token)])", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n bt_device_id: int = config[CONF_BT_DEVICE_ID]\n\n beacons: dict[str, dict[str, str]] = config[CONF_BEACONS]\n devices: list[EddystoneTemp] = []\n\n for dev_name, properties in beacons.items():\n namespace = get_from_conf(properties, CONF_NAMESPACE, 20)\n instance = get_from_conf(properties, CONF_INSTANCE, 12)\n name = properties.get(CONF_NAME, dev_name)\n\n if instance is None or namespace is None:\n _LOGGER.error(\"Skipping %s\", dev_name)\n continue\n\n devices.append(EddystoneTemp(name, namespace, instance))\n\n if devices:\n mon = Monitor(hass, devices, bt_device_id)\n\n def monitor_stop(event: Event) -> None:\n \"\"\"Stop the monitor thread.\"\"\"\n _LOGGER.info(\"Stopping scanner for Eddystone beacons\")\n mon.stop()\n\n def monitor_start(event: Event) -> None:\n \"\"\"Start the monitor thread.\"\"\"\n _LOGGER.info(\"Starting scanner for Eddystone beacons\")\n mon.start()\n\n add_entities(devices)\n mon.start()\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)\n hass.bus.listen_once(EVENT_HOMEASSISTANT_START, monitor_start)\n else:\n _LOGGER.warning(\"No devices were added\")", "def test_setup_platform(self, store_mock):\n config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n with assert_setup_component(1, ip.DOMAIN):\n setup_component(self.hass, ip.DOMAIN, config)\n self.hass.block_till_done()\n\n assert self.hass.states.get(\"image_processing.microsoftface_demo_camera\")", "def __init__(self):\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.color_sensor = ev3.ColorSensor()\n self.ir_sensor = ev3.InfraredSensor()\n self.MAX_SPEED = 900\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n assert self.left_motor.connected\n assert self.right_motor.connected\n assert self.arm_motor.connected\n assert self.touch_sensor\n assert self.color_sensor\n assert self.ir_sensor\n assert self.pixy", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/collective/demo.plone.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n if env.latest:\n if env.python3:\n sudo('ln -s local_demo_nightly_py3.cfg local.cfg', user=env.deploy_user) # noqa: E501\n else:\n sudo('ln -s local_demo_nightly_py2.cfg local.cfg', user=env.deploy_user) # noqa: E501\n else:\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n if env.latest:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/plone/buildout.coredev/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n else:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/starzel/buildout/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def setUp(self):\n self.os = \"debian\"", "def platform_start(self):\n self.platform.start()", "def setup_platform(hass, config, add_entities, discovery_info=None):\n bloomskystorm = hass.components.bloomskystorm\n # Default needed in case of discovery\n sensors = config.get(CONF_MONITORED_CONDITIONS, SENSOR_TYPES)\n\n for device in bloomskystorm.BLOOMSKYSTORM.devices.values():\n for variable in sensors:\n add_entities(\n [BloomSkyStormSensor(bloomskystorm.BLOOMSKYSTORM, device, variable)], True)", "def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255", "def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255", "def setup_platform(hass, config, add_devices, discovery_info=None):\n from pybotvac import Account\n\n try:\n auth = Account(config[CONF_USERNAME], config[CONF_PASSWORD])\n except HTTPError:\n _LOGGER.error(\"Unable to connect to Neato API\")\n return False\n\n dev = []\n for robot in auth.robots:\n for type_name in SWITCH_TYPES:\n dev.append(NeatoConnectedSwitch(robot, type_name))\n add_devices(dev)", "def elinos_init():\n elinos_env = get_elinos_environment()\n\n solib_dirs = []\n\n # System libraries\n if None in (elinos_env[key] for key in (\"cdk\", \"target\")):\n warn(\"ELinOS system libraries will not be loaded\")\n else:\n solib_prefix = \"%s/%s\" % (elinos_env[\"cdk\"], elinos_env[\"target\"])\n solib_dirs += [\"%s/%s\" % (solib_prefix, \"lib\")]\n gdb.execute(\"set solib-absolute-prefix %s\" % solib_prefix)\n\n # Xenomai libraries. Those are optional, so have a lighter warning\n # if they cannot be located.\n if elinos_env[\"project\"] is None:\n warn(\"Xenomai libraries may not be loaded\")\n else:\n for dir in elinos_env['xenomai']:\n solib_dirs += [\"%s/%s\"\n % (dir, \"xenomai-build/usr/realtime/lib\")]\n\n if len(solib_dirs) != 0:\n gdb.execute(\"set solib-search-path %s\" % \":\".join(solib_dirs))", "def setupHw():\n\n pin.setupHw()\n pin.setupOutPins(traffic_lights)\n pin.setDebug(False)", "def setup_method(self):\n self.hass = get_test_home_assistant()\n\n self.config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\", \"name\": \"test local\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n self.endpoint_url = f\"https://westus.{mf.FACE_API_URL}\"", "def init():\n\n global leftDriverStick\n global rightDriverStick\n global goGamePad\n\n try:\n leftDriverStick = T16000M(0)\n except:\n print('OI: Error - Could not instantiate Left Driver Stick on USB port 0!!!')\n\n try:\n rightDriverStick = T16000M(1)\n except:\n print('OI: Error - Could not instantiate Right Driver Stick on USB port 0!!!')\n\n try:\n goGamePad = Joystick(2)\n except:\n print('OI: Error - Could not instantiate Right Driver Stick on USB port 2!!!')\n\n\n # ----------------------------------------------------------\n # Driver Controls\n # ----------------------------------------------------------\n #global resetYawBtn\n #resetYawBtn = JoystickButton(rightDriverStick, config.btnResetYawAngleIndex)\n #resetYawBtn.whenPressed(NavxResetYawAngle())\n\n global btnDriveSlow\n btnDriveSlow = JoystickButton(leftDriverStick, config.btnDriveSlow)\n \n global btnEnableLightSensor\n btnEnableLightSensor = JoystickButton(leftDriverStick, config.btnEnableLightSensorIndex)\n\n global btnExtendAll\n btnExtendAll = JoystickButton(rightDriverStick, config.btnExtendAllIndex)\n btnExtendAll.whenPressed(ExtendAll())\n\n global btnRetract\n btnRetract = JoystickButton(rightDriverStick, config.btnRetractAllIndex)\n btnRetract.whenPressed(RetractAll())\n\n global btnExtendFront\n btnExtendFront = JoystickButton(rightDriverStick, config.btnExtendFrontIndex)\n btnExtendFront.whenPressed(ExtendFront())\n\n global btnExtendBack\n btnExtendBack = JoystickButton(rightDriverStick, config.btnExtendBackIndex)\n btnExtendBack.whenPressed(ExtendBack())\n\n global btnRetractFront\n btnRetractFront = JoystickButton(rightDriverStick, config.btnRetractFrontIndex)\n btnRetractFront.whenPressed(RetractFront())\n\n global btnCargoGrabTog\n btnCargoGrabTog = JoystickButton(goGamePad, config.btnHatchGrabTogIndex)\n btnCargoGrabTog.whenPressed(ExtendBack())\n \n \"\"\"\n global btnResetEncoders\n btnResetEncoders = JoystickButton(leftDriverStick, config.btnResetEncodersIndex)\n btnResetEncoders.whenPressed(TankDriveResetEncoders())\n \"\"\"\n\n \"\"\"\n global axisElevator\n axisElevator = JoystickAxis(goGamePad, config.axisElevatorIndex)\n axisElevator. #??? idk how to configure joystick axis\n \"\"\"\n\n \"\"\"\n global btnRampTog\n btnRampTog = JoystickButton(goGamePad, config.btnRampTogIndex)\n btnRampTog.whenPressed(ExtendFront())\n \"\"\"\n #global btnResetEncoders\n #btnResetEncoders = JoystickButton(leftDriverStick, config.btnResetEncodersIndex)\n #btnResetEncoders.whenPressed(TankDriveResetEncoders())\n\n # These variable names are inconsistent, need to be fixed!!!!\n #global btnRampExtendTog\n #btnRampExtendTog = JoystickButton(goGamePad, config.btnRampExtendTogIndex)\n #btnRampExtendTog.whenPressed(RampExtend())\n\n #global btnRampRetractTog\n #btnRampRetractTog = JoystickButton(goGamePad, config.btnRampRetractTogIndex)\n #btnRampRetractTog.whenPressed(RampRetract())", "def setup_platform(hass, config, add_entities, discovery_info=None):\n now = date.today()\n icon = config[CONF_ICON]\n poubelle_paire = config[CONF_JOUR_PAIRE]\n poubelle_impaire = config[CONF_JOUR_IMPAIRE]\n jour = config[CONF_JOUR]\n devices = [PoubelleSensor('Poubelle de la semaine', poubelle_paire,poubelle_impaire,jour,icon)]\n add_entities(devices, True)", "def setup_platform(hass, config, add_devices, discovery_info=None):\n from evohomeclient import EvohomeClient\n\n username = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n\n if username is None or password is None:\n _LOGGER.error(\"Missing required configuration items %s or %s\",\n CONF_USERNAME, CONF_PASSWORD)\n return False\n\n evo_api = EvohomeClient(username, password)\n try:\n add_devices([RoundThermostat(evo_api)])\n except socket.error:\n _LOGGER.error(\n \"Connection error logging into the honeywell evohome web service\"\n )\n return False", "async def async_setup_entry(\n hass: HomeAssistantType, entry: ConfigEntry, async_add_entities\n) -> None:\n _LOGGER.info(\"Set up AstroWeather binary sensor platform\")\n\n fcst_coordinator = hass.data[DOMAIN][entry.entry_id][\"fcst_coordinator\"]\n if not fcst_coordinator.data:\n return False\n\n coordinator = hass.data[DOMAIN][entry.entry_id][\"coordinator\"]\n if not coordinator.data:\n return False\n\n astroweather = hass.data[DOMAIN][entry.entry_id][\"aw\"]\n if not astroweather:\n return False\n\n sensors = []\n for sensor in SENSOR_TYPES:\n sensors.append(\n AstroWeatherBinarySensor(coordinator, entry.data, sensor, fcst_coordinator)\n )\n\n async_add_entities(sensors, True)\n return True", "def __init__(self):\n self.inches_moved = 0\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.running = True\n self.ir_sensor = ev3.InfraredSensor()\n self.color_sensor = ev3.ColorSensor()\n assert self.color_sensor\n assert self.ir_sensor\n assert self.touch_sensor\n self.arm_motor.position = 0\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n assert self.pixy\n\n self.right_motor_encoder = self.right_motor.position\n self.left_motor_encoder = self.left_motor.position", "def __init__(self):\n self.charm_config = hookenv.config()\n self.kv = unitdata.kv()\n if not self.synapse_signing_key_file:\n self.synapse_signing_key_file = \"{}/{}.signing.key\".format(\n self.synapse_conf_dir, self.get_server_name()\n )", "def setup(hass, config):\n global HASS_PLATFORM\n socketserver.TCPServer.allow_reuse_address = True\n HASS_PLATFORM = hass\n\n HASS_PLATFORM.data[DOMAIN] = {}\n\n port = int(config[DOMAIN][CONF_PORT])\n\n for hub_config in config[DOMAIN][CONF_HUBS]:\n hass.data[DOMAIN][hub_config[CONF_ACCOUNT]] = Hub(hass, hub_config)\n\n for component in [\"binary_sensor\", \"alarm_control_panel\", \"sensor\"]:\n discovery.load_platform(hass, component, DOMAIN, {}, config)\n\n for hub in HASS_PLATFORM.data[DOMAIN].values():\n for sensor in hub._states.values():\n sensor.async_schedule_update_ha_state()\n\n server = socketserver.TCPServer((\"\", port), AlarmTCPHandler)\n\n server_thread = threading.Thread(target=server.serve_forever)\n server_thread.start()\n\n return True", "async def setup_comp(opp):\n opp.config.units = IMPERIAL_SYSTEM\n assert await async_setup_component(\n opp, water_heater.DOMAIN, {\"water_heater\": {\"platform\": \"demo\"}}\n )\n await opp.async_block_till_done()", "def __init__(self):\n self.server_name = 'Binary Light Device'\n self.device = None", "def setup_platform(hass, config, add_entities, discovery_info=None):\n try:\n envirophat = importlib.import_module(\"envirophat\")\n except OSError:\n _LOGGER.error(\"No Enviro pHAT was found\")\n return False\n\n data = EnvirophatData(envirophat, config.get(CONF_USE_LEDS))\n\n display_options = config[CONF_DISPLAY_OPTIONS]\n entities = [\n EnvirophatSensor(data, description)\n for description in SENSOR_TYPES\n if description.key in display_options\n ]\n add_entities(entities, True)", "def __init__(self, mb_info, switch_config):\n self.microblaze = Arduino(mb_info, ARDUINO_MAILBOX_PROGRAM)\n self.iop_switch_config = switch_config", "def setup_platform(hass, config, add_devices, discovery_info=None):\n name = config.get(CONF_NAME)\n ip_addr = config.get(CONF_HOST)\n mac_addr = binascii.unhexlify(config.get(CONF_MAC).encode().replace(b':', b''))\n target_temp_default = config.get(CONF_TARGET_TEMP)\n target_temp_step = config.get(CONF_TARGET_TEMP_STEP)\n operation_list = DEFAULT_OPERATION_LIST\n \n import broadlink\n \n broadlink_device = broadlink.hysen((ip_addr, 80), mac_addr, None)\n broadlink_device.timeout = config.get(CONF_TIMEOUT)\n\n try:\n broadlink_device.auth()\n add_devices([\n BroadlinkHysenClimate(hass, name, broadlink_device, target_temp_default, target_temp_step, operation_list)\n ])\n except socket.timeout:\n _LOGGER.error(\"Failed to connect to Broadlink Hysen Device IP:%s\",ip_addr)", "def __init__(self,\n moduletype: str = 'pH', \n name: str = 'Atlas_pH_sensor', \n bus: int = 1, \n address: int = 99) -> None:\n # The .initialise method is called in AtlasI2C __init__ \n # to initialise the sensors.\n super().__init__(moduletype=moduletype, name=name, bus=bus, address=address)", "def setup_platform(hass, config, add_entities, discovery_info=None):\n name = config.get(CONF_NAME)\n token_path = hass.config.path(config.get(CONF_TOKEN_FILE))\n latitude = hass.config.latitude\n longitude = hass.config.longitude\n\n token_cache = load_json(token_path)\n if not token_cache or \"authentication_token\" not in token_cache:\n raise ValueError(\"Missing or bad token file.\")\n\n add_entities([VoiNearestScooterSensor(name, token_path, latitude, longitude)])", "def init(self):\n self.reset()\n\n self.__interface.send_command('POWER_SETTING')\n self.__interface.send_data(0x37)\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('PANEL_SETTING')\n self.__interface.send_data(0xCF)\n self.__interface.send_data(0x08)\n\n self.__interface.send_command('BOOSTER_SOFT_START')\n self.__interface.send_data(0xc7)\n self.__interface.send_data(0xcc)\n self.__interface.send_data(0x28)\n\n self.__interface.send_command('POWER_ON')\n self.wait_until_idle()\n\n self.__interface.send_command('PLL_CONTROL')\n self.__interface.send_data(0x3c)\n\n self.__interface.send_command('TEMPERATURE_CALIBRATION')\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('VCOM_AND_DATA_INTERVAL_SETTING')\n self.__interface.send_data(0x77)\n\n self.__interface.send_command('TCON_SETTING')\n self.__interface.send_data(0x22)\n\n self.__interface.send_command('TCON_RESOLUTION')\n self.__interface.send_data(0x02) #source 640\n self.__interface.send_data(0x80)\n self.__interface.send_data(0x01) #gate 384\n self.__interface.send_data(0x80)\n\n self.__interface.send_command('VCM_DC_SETTING')\n self.__interface.send_data(0x1E) #decide by LUT file\n\n self.__interface.send_command(0xe5, False) #FLASH MODE\n self.__interface.send_data(0x03)", "def setup_platform(hass, config, add_devices, discovery_info=None):\n # Only act if loaded via mysensors by discovery event.\n # Otherwise gateway is not setup.\n if discovery_info is None:\n return\n\n for gateway in mysensors.GATEWAYS.values():\n # Define the S_TYPES and V_TYPES that the platform should handle as\n # states. Map them in a dict of lists.\n pres = gateway.const.Presentation\n set_req = gateway.const.SetReq\n map_sv_types = {\n pres.S_TEMP: [set_req.V_TEMP],\n pres.S_HUM: [set_req.V_HUM],\n pres.S_BARO: [set_req.V_PRESSURE, set_req.V_FORECAST],\n pres.S_WIND: [set_req.V_WIND, set_req.V_GUST],\n pres.S_RAIN: [set_req.V_RAIN, set_req.V_RAINRATE],\n pres.S_UV: [set_req.V_UV],\n pres.S_WEIGHT: [set_req.V_WEIGHT, set_req.V_IMPEDANCE],\n pres.S_POWER: [set_req.V_WATT, set_req.V_KWH],\n pres.S_DISTANCE: [set_req.V_DISTANCE],\n pres.S_LIGHT_LEVEL: [set_req.V_LIGHT_LEVEL],\n pres.S_IR: [set_req.V_IR_RECEIVE],\n pres.S_WATER: [set_req.V_FLOW, set_req.V_VOLUME],\n pres.S_CUSTOM: [set_req.V_VAR1,\n set_req.V_VAR2,\n set_req.V_VAR3,\n set_req.V_VAR4,\n set_req.V_VAR5],\n pres.S_SCENE_CONTROLLER: [set_req.V_SCENE_ON,\n set_req.V_SCENE_OFF],\n }\n if float(gateway.protocol_version) < 1.5:\n map_sv_types.update({\n pres.S_AIR_QUALITY: [set_req.V_DUST_LEVEL],\n pres.S_DUST: [set_req.V_DUST_LEVEL],\n })\n if float(gateway.protocol_version) >= 1.5:\n map_sv_types.update({\n pres.S_COLOR_SENSOR: [set_req.V_RGB],\n pres.S_MULTIMETER: [set_req.V_VOLTAGE,\n set_req.V_CURRENT,\n set_req.V_IMPEDANCE],\n pres.S_SOUND: [set_req.V_LEVEL],\n pres.S_VIBRATION: [set_req.V_LEVEL],\n pres.S_MOISTURE: [set_req.V_LEVEL],\n pres.S_AIR_QUALITY: [set_req.V_LEVEL],\n pres.S_DUST: [set_req.V_LEVEL],\n })\n map_sv_types[pres.S_LIGHT_LEVEL].append(set_req.V_LEVEL)\n\n if float(gateway.protocol_version) >= 2.0:\n map_sv_types.update({\n pres.S_INFO: [set_req.V_TEXT],\n pres.S_GAS: [set_req.V_FLOW, set_req.V_VOLUME],\n pres.S_GPS: [set_req.V_POSITION],\n pres.S_WATER_QUALITY: [set_req.V_TEMP, set_req.V_PH,\n set_req.V_ORP, set_req.V_EC]\n })\n map_sv_types[pres.S_CUSTOM].append(set_req.V_CUSTOM)\n map_sv_types[pres.S_POWER].extend(\n [set_req.V_VAR, set_req.V_VA, set_req.V_POWER_FACTOR])\n\n devices = {}\n gateway.platform_callbacks.append(mysensors.pf_callback_factory(\n map_sv_types, devices, add_devices, MySensorsSensor))", "def async_add_insteon_binary_sensor_entities(discovery_info=None):\n async_add_insteon_entities(\n hass,\n BINARY_SENSOR_DOMAIN,\n InsteonBinarySensorEntity,\n async_add_entities,\n discovery_info,\n )", "def setup(hass: HomeAssistant, base_config: ConfigType) -> bool: # noqa: C901\n\n hass.data[DOMAIN] = {}\n\n # Parse configuration into a dict of device name to physical address\n # represented as a list of four elements.\n device_aliases = {}\n devices = base_config[DOMAIN].get(CONF_DEVICES, {})\n _LOGGER.debug(\"Parsing config %s\", devices)\n device_aliases.update(parse_mapping(devices))\n _LOGGER.debug(\"Parsed devices: %s\", device_aliases)\n\n platform = base_config[DOMAIN].get(CONF_PLATFORM, SWITCH)\n\n loop = (\n # Create own thread if more than 1 CPU\n hass.loop\n if multiprocessing.cpu_count() < 2\n else None\n )\n host = base_config[DOMAIN].get(CONF_HOST)\n display_name = base_config[DOMAIN].get(CONF_DISPLAY_NAME, DEFAULT_DISPLAY_NAME)\n if host:\n adapter = TcpAdapter(host, name=display_name, activate_source=False)\n else:\n adapter = CecAdapter(name=display_name[:12], activate_source=False)\n hdmi_network = HDMINetwork(adapter, loop=loop)\n\n def _adapter_watchdog(now=None):\n _LOGGER.debug(\"Reached _adapter_watchdog\")\n event.call_later(hass, WATCHDOG_INTERVAL, _adapter_watchdog_job)\n if not adapter.initialized:\n _LOGGER.info(\"Adapter not initialized; Trying to restart\")\n hass.bus.fire(EVENT_HDMI_CEC_UNAVAILABLE)\n adapter.init()\n\n _adapter_watchdog_job = HassJob(_adapter_watchdog, cancel_on_shutdown=True)\n\n @callback\n def _async_initialized_callback(*_: Any):\n \"\"\"Add watchdog on initialization.\"\"\"\n return event.async_call_later(hass, WATCHDOG_INTERVAL, _adapter_watchdog_job)\n\n hdmi_network.set_initialized_callback(_async_initialized_callback)\n\n def _volume(call: ServiceCall) -> None:\n \"\"\"Increase/decrease volume and mute/unmute system.\"\"\"\n mute_key_mapping = {\n ATTR_TOGGLE: KEY_MUTE_TOGGLE,\n ATTR_ON: KEY_MUTE_ON,\n ATTR_OFF: KEY_MUTE_OFF,\n }\n for cmd, att in call.data.items():\n if cmd == CMD_UP:\n _process_volume(KEY_VOLUME_UP, att)\n elif cmd == CMD_DOWN:\n _process_volume(KEY_VOLUME_DOWN, att)\n elif cmd == CMD_MUTE:\n hdmi_network.send_command(\n KeyPressCommand(mute_key_mapping[att], dst=ADDR_AUDIOSYSTEM)\n )\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n _LOGGER.info(\"Audio muted\")\n else:\n _LOGGER.warning(\"Unknown command %s\", cmd)\n\n def _process_volume(cmd, att):\n if isinstance(att, (str,)):\n att = att.strip()\n if att == CMD_PRESS:\n hdmi_network.send_command(KeyPressCommand(cmd, dst=ADDR_AUDIOSYSTEM))\n elif att == CMD_RELEASE:\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n else:\n att = 1 if att == \"\" else int(att)\n for _ in range(0, att):\n hdmi_network.send_command(KeyPressCommand(cmd, dst=ADDR_AUDIOSYSTEM))\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n\n def _tx(call: ServiceCall) -> None:\n \"\"\"Send CEC command.\"\"\"\n data = call.data\n if ATTR_RAW in data:\n command = CecCommand(data[ATTR_RAW])\n else:\n src = data.get(ATTR_SRC, ADDR_UNREGISTERED)\n dst = data.get(ATTR_DST, ADDR_BROADCAST)\n if ATTR_CMD in data:\n cmd = data[ATTR_CMD]\n else:\n _LOGGER.error(\"Attribute 'cmd' is missing\")\n return\n if ATTR_ATT in data:\n if isinstance(data[ATTR_ATT], (list,)):\n att = data[ATTR_ATT]\n else:\n att = reduce(lambda x, y: f\"{x}:{y:x}\", data[ATTR_ATT])\n else:\n att = \"\"\n command = CecCommand(cmd, dst, src, att)\n hdmi_network.send_command(command)\n\n def _standby(call: ServiceCall) -> None:\n hdmi_network.standby()\n\n def _power_on(call: ServiceCall) -> None:\n hdmi_network.power_on()\n\n def _select_device(call: ServiceCall) -> None:\n \"\"\"Select the active device.\"\"\"\n if not (addr := call.data[ATTR_DEVICE]):\n _LOGGER.error(\"Device not found: %s\", call.data[ATTR_DEVICE])\n return\n if addr in device_aliases:\n addr = device_aliases[addr]\n else:\n entity = hass.states.get(addr)\n _LOGGER.debug(\"Selecting entity %s\", entity)\n if entity is not None:\n addr = entity.attributes[\"physical_address\"]\n _LOGGER.debug(\"Address acquired: %s\", addr)\n if addr is None:\n _LOGGER.error(\n \"Device %s has not physical address\", call.data[ATTR_DEVICE]\n )\n return\n if not isinstance(addr, (PhysicalAddress,)):\n addr = PhysicalAddress(addr)\n hdmi_network.active_source(addr)\n _LOGGER.info(\"Selected %s (%s)\", call.data[ATTR_DEVICE], addr)\n\n def _update(call: ServiceCall) -> None:\n \"\"\"Update if device update is needed.\n\n Called by service, requests CEC network to update data.\n \"\"\"\n hdmi_network.scan()\n\n def _new_device(device):\n \"\"\"Handle new devices which are detected by HDMI network.\"\"\"\n key = f\"{DOMAIN}.{device.name}\"\n hass.data[DOMAIN][key] = device\n ent_platform = base_config[DOMAIN][CONF_TYPES].get(key, platform)\n discovery.load_platform(\n hass,\n ent_platform,\n DOMAIN,\n discovered={ATTR_NEW: [key]},\n hass_config=base_config,\n )\n\n def _shutdown(call):\n hdmi_network.stop()\n\n def _start_cec(callback_event):\n \"\"\"Register services and start HDMI network to watch for devices.\"\"\"\n hass.services.register(\n DOMAIN, SERVICE_SEND_COMMAND, _tx, SERVICE_SEND_COMMAND_SCHEMA\n )\n hass.services.register(\n DOMAIN, SERVICE_VOLUME, _volume, schema=SERVICE_VOLUME_SCHEMA\n )\n hass.services.register(\n DOMAIN,\n SERVICE_UPDATE_DEVICES,\n _update,\n schema=SERVICE_UPDATE_DEVICES_SCHEMA,\n )\n hass.services.register(DOMAIN, SERVICE_POWER_ON, _power_on)\n hass.services.register(DOMAIN, SERVICE_STANDBY, _standby)\n hass.services.register(DOMAIN, SERVICE_SELECT_DEVICE, _select_device)\n\n hdmi_network.set_new_device_callback(_new_device)\n hdmi_network.start()\n\n hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_cec)\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)\n return True", "def do_load_environment(self, *arg):\n print(\"Loading sensors\")\n self.environment = ArduinoSerialMonitor(auto_detect=False)\n self.do_enable_sensor('environment', delay=1)", "def setup_sensors(self, configs):\n self.__sensors = self.setup_components(configs, 'scale_client.sensors')", "def setup_platform(hass, config, add_devices, discovery_info=None):\n # Add devices\n add_devices([SemsSensor(\"SEMS Portal\", config)], True)", "def setup_platform(hass, config, add_entities, discovery_info=None):\n currency = config.get(CONF_CURRENCY)\n display_currency = config.get(CONF_DISPLAY_CURRENCY).upper()\n display_currency_decimals = config.get(CONF_DISPLAY_CURRENCY_DECIMALS)\n name = config.get(CONF_NAME)\n icon = config.get(CONF_ICON)\n\n try:\n BitStampNetData(currency, display_currency).update()\n except HTTPError:\n _LOGGER.warning(\n \"Currency %s or display currency %s \"\n \"is not available. Using btc \"\n \"and EUR.\",\n currency,\n display_currency,\n )\n currency = DEFAULT_CURRENCY\n display_currency = DEFAULT_DISPLAY_CURRENCY\n icon = DEFAULT_ICON\n\n add_entities(\n [\n BitStampNetSensor(\n BitStampNetData(currency, display_currency),\n name,\n currency,\n display_currency,\n display_currency_decimals,\n icon\n )\n ],\n True,\n )", "def __setup_deploy(self):\n init_dict = dict()\n\n # Fetch and store platform\n init_dict['os'] = platform.system()\n # Fetch, validate and store target ip address\n while True:\n address = input('Please enter the IP address: ')\n try:\n ipaddress.ip_address(address)\n init_dict['ip'] = address\n break\n except Exception as error:\n print('Error: ', error)\n\n # Dump data into file\n self.settings_file.touch()\n with self.settings_file.open('w') as file:\n json.dump(init_dict, file, indent=4)", "def setup(self):\n # if not system.restore_snapshot():\n # self.log.debug(\"No snapshot to restore, if this is not expected please contact automation team\")\n crindsim.set_mode(\"manual\")\n pos.connect()\n pos.sign_on()", "def _initialize(self):\r\n print(\"Set the CP mode to EVSE\")\r\n self.whitebeet.controlPilotSetMode(1)\r\n print(\"Set the CP duty cycle to 100%\")\r\n self.whitebeet.controlPilotSetDutyCycle(100)\r\n print(\"Start the CP service\")\r\n self.whitebeet.controlPilotStart()\r\n print(\"Start SLAC in EVSE mode\")\r\n self.whitebeet.slacStart(1)\r\n time.sleep(2)", "async def async_setup_platform(\n hass, config, async_add_entities, discovery_info=None):\n data = await nooa.get_data()\n entities = [\n NooaSensor(data, \"R\"),\n NooaSensor(data, \"S\"),\n NooaSensor(data, \"G\"),\n ]\n async_add_entities(entities)", "def __init__(self, robot):\n\n #initialise the stick and the smart dashboard (in case we need stuff for auton):\n self.stick = wpilib.Joystick(0)\n self.smart_dashboard = NetworkTable.getTable(\"SmartDashboard\")\n\n #Main stick buttons.\n #-----------------------------------------------------------------------\n trigger = JoystickButton(self.stick, 1)\n thumb = JoystickButton(self.stick, 2)\n three = JoystickButton(self.stick, 3)\n four = JoystickButton(self.stick, 4)\n five = JoystickButton(self.stick, 5)\n six = JoystickButton(self.stick, 6)\n seven = JoystickButton(self.stick, 7)\n eight = JoystickButton(self.stick, 8)\n nine = JoystickButton(self.stick, 9)\n ten = JoystickButton(self.stick, 10)\n eleven = JoystickButton(self.stick, 11)\n twelve = JoystickButton(self.stick, 12)\n\n #Hat switch POV stuff.\n #-----------------------------------------------------------------------\n pov_north = POVButton(self.stick, 0)\n pov_northeast = POVButton(self.stick, 45)\n pov_east = POVButton(self.stick, 90)\n pov_southeast = POVButton(self.stick, 135)\n pov_south = POVButton(self.stick, 180)\n pov_southwest = POVButton(self.stick, 225)\n pov_west = POVButton(self.stick, 270)\n pov_northwest = POVButton(self.stick, 315)\n\n pov_south.whenPressed(SuperStrafeEntertainmentSystem(robot, SuperStrafeEntertainmentSystem.kBack))\n pov_north.whenPressed(SuperStrafeEntertainmentSystem(robot, SuperStrafeEntertainmentSystem.kForward))\n pov_east.whenPressed(SuperStrafeEntertainmentSystem(robot, SuperStrafeEntertainmentSystem.kRight))\n pov_west.whenPressed(SuperStrafeEntertainmentSystem(robot, SuperStrafeEntertainmentSystem.kLeft))", "def __init__(self):\n GPIO.setwarnings(False)\n GPIO.cleanup() # Reset the high and low levels of the GPIO port\n #The following code defines the GPIO used to control the L298N chip. This definition is different for different Raspberry Pi driver boards.\n self.Motor_A_EN = 17\n self.Motor_B_EN = 4\n self.Motor_A_Pin1 = 27\n self.Motor_A_Pin2 = 18\n self.Motor_B_Pin1 = 21\n self.Motor_B_Pin2 = 26\n self.setup()", "def one_time_setup(node, rhbuild, branch: str) -> None:\n node.exec_command(\n cmd=f\"sudo rm -rf ceph && git clone --branch {branch} --single-branch --depth 1 {TEST_REPO}\"\n )\n os_ver = rhbuild.split(\"-\")[-1]\n ceph_ver = rhbuild.split(\"-\")[0]\n\n if os_ver == \"7\":\n node.exec_command(\n cmd=\"sed -i '49 a rbd feature disable testimg1 object-map fast-diff deep-flatten' \"\n \"ceph/qa/workunits/rbd/kernel.sh\"\n )\n\n if \"4.\" in ceph_ver:\n node.exec_command(\n cmd=\"sed -i 's/blocklist/blacklist/g' \"\n \"ceph/qa/workunits/rbd/krbd_exclusive_option.sh\"\n )\n\n try:\n node.exec_command(cmd=\"rpm -qa | grep xmlstarlet\")\n return\n except BaseException: # noqa\n pass\n\n EPEL_RPM = (\n f\"https://dl.fedoraproject.org/pub/epel/epel-release-latest-{os_ver}.noarch.rpm\"\n )\n\n commands = [\n {\"cmd\": f\"yum install -y {EPEL_RPM} --nogpgcheck\", \"sudo\": True},\n {\n \"cmd\": \"yum install -y xmlstarlet rbd-nbd qemu-img cryptsetup --nogpgcheck\",\n \"sudo\": True,\n },\n ]\n for command in commands:\n node.exec_command(**command)\n\n # Blind sleep to ensure the Mon service has restarted.\n # TODO: Identify a way to check the service is running\n sleep(5)", "def setup_platform(hass, config, add_entities, discovery_info=None):\n\n currency = config[CONF_CURRENCY]\n\n if currency not in exchangerates.get_ticker():\n _LOGGER.warning(\"Currency %s is not available. Using USD\", currency)\n currency = DEFAULT_CURRENCY\n\n data = BitcoinData()\n dev = []\n for variable in config[CONF_DISPLAY_OPTIONS]:\n dev.append(BitcoinSensor(data, variable, currency))\n\n add_entities(dev, True)", "def setPlatform(self):\n\t\treturn None", "def setup_platform(hass, config, add_entities, discovery_info=None):\n\n _LOGGER.debug('Initializing Gazpar platform...')\n\n try:\n username = config[CONF_USERNAME]\n password = config[CONF_PASSWORD]\n cost = config[CONF_COST]\n\n account = GazparAccount(hass, username, password, cost)\n add_entities(account.sensors, True)\n\n _LOGGER.debug('Gazpar platform initialization has completed successfully')\n except BaseException:\n _LOGGER.error('Gazpar platform initialization has failed with exception : {0}'.format(traceback.format_exc()))", "def __init__(self, parent, endpoint):\n Wemo_Endpoint.__init__(self, parent, endpoint)\n self.device_type = self._Parent._DeviceTypes.get('wemo_switch')\n self.FEATURES.update({\n FEATURE_BRIGHTNESS: False,\n FEATURE_PERCENT: False,\n FEATURE_NUMBER_OF_STEPS: False\n })", "def setup_platform(hass, config, add_entities, discovery_info=None):\n name = config.get(CONF_NAME)\n username = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n\n xfinity_data = XfinityUsageData(username, password)\n sensor = XfinityUsageSensor(name, xfinity_data)\n\n def _first_run():\n sensor.update()\n add_entities([sensor])\n\n # Wait until start event is sent to load this component.\n hass.bus.listen_once(EVENT_HOMEASSISTANT_START, lambda _: _first_run())", "def test_setup(self):\n engine = Engine(self.config_file, self.api_token)\n engine.setup()", "def setup_platform(opp, config, add_entities, discovery_info=None):\n\n for scene in pywink.get_scenes():\n _id = scene.object_id() + scene.name()\n if _id not in opp.data[DOMAIN][\"unique_ids\"]:\n add_entities([WinkScene(scene, opp)])", "def setUpClass(cls):\n super(CentralCharmOperationTest, cls).setUpClass()\n cls.services = [\n 'ovn-northd',\n 'ovsdb-server',\n ]\n source = zaza.model.get_application_config(\n cls.application_name)['source']['value']\n logging.info(source)\n if 'train' in source:\n cls.nrpe_checks = [\n 'ovn-northd',\n 'ovn-nb-ovsdb',\n 'ovn-sb-ovsdb',\n ]\n else:\n # Ussuri or later (distro or cloudarchive)\n cls.nrpe_checks = [\n 'ovn-northd',\n 'ovn-ovsdb-server-sb',\n 'ovn-ovsdb-server-nb',\n ]", "def setup(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.Motor_A_EN, GPIO.OUT)\n GPIO.setup(self.Motor_B_EN, GPIO.OUT)\n GPIO.setup(self.Motor_A_Pin1, GPIO.OUT)\n GPIO.setup(self.Motor_A_Pin2, GPIO.OUT)\n GPIO.setup(self.Motor_B_Pin1, GPIO.OUT)\n GPIO.setup(self.Motor_B_Pin2, GPIO.OUT)\n self.motorStop() # Avoids automatic motor rotation after initialization\n try: # Try is used here to avoid errors due to repeated setting of PWM\n self.pwm_A = GPIO.PWM(self.Motor_A_EN, 1000)\n self.pwm_B = GPIO.PWM(self.Motor_B_EN, 1000)\n except:\n pass", "async def async_setup_platform(\n hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None\n) -> None:\n if discovery_info is None:\n return\n\n broker = hass.data[DOMAIN][\"broker\"]\n\n async_add_entities(\n [\n GeniusSwitch(broker, z)\n for z in broker.client.zone_objs\n if z.data[\"type\"] == GH_ON_OFF_ZONE\n ]\n )", "def boot(self):\n\n pass", "def setUp(self):\n self.manager, self.proxy = tests.utils.setup_xmlrpc()\n self.proxy.provider.register(\n PROVIDER_ID, USERNAME, PASSWORD, URL, TENANT, PROVIDER_TYPE,\n DEFAULT_IMAGE, DEFAULT_FLAVOR\n )\n status = self.proxy.server.create(\n PROVIDER_ID, IMAGE, FLAVOR\n )\n self.check_xmlrpc_command_result(status)\n status = self.proxy.server.list(PROVIDER_ID)\n info = self.check_xmlrpc_simple(status, {})\n self.machine_uuid = info['uuid']", "def __init__(self, parent, endpoint):\n Wemo_Endpoint.__init__(self, parent, endpoint)\n self.device_type = self._Parent._DeviceTypes.get('wemo_light')\n self.FEATURES.update({\n FEATURE_BRIGHTNESS: True,\n FEATURE_PERCENT: True,\n FEATURE_NUMBER_OF_STEPS: 100\n })", "def setUp(self):\n # Direct connection used to match the property values\n self.sockobj = socket(AF_INET, SOCK_STREAM)\n self.sockobj.settimeout(socket_timeout)\n # Connect to the selected server\n self.sockobj.connect(server) \n self.pyclient = PySimpleClient()\n self.cmd_num = 0\n for servo_type in app_nr.values():\n self.__dict__[servo_type] = self.pyclient.getComponent(\"MINORSERVO/\" + servo_type)", "async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n if DOMAIN not in hass.data:\n hass.data[DOMAIN] = HueSensorData(hass)\n\n await hass.data[DOMAIN].async_add_platform_entities(\n HueBinarySensor,\n BINARY_SENSOR_MODELS,\n async_add_entities,\n config.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL),\n )", "def setup_platform(hass, config, add_entities, discovery_info=None):\n add_entities([\n EzvizCamera(hass, config)\n ])\n return True", "def setup_platform(hass, config, add_entities, discovery_info=None):\n hass.data.setdefault(DOMAIN, {})\n\n def service_set_override(call):\n \"\"\"Handle the service call.\"\"\"\n entity_id = call.data.get(ATTR_ENTITY_ID)\n temperature = call.data.get(ATTR_TEMPERATURE)\n until = call.data.get(\n ATTR_UNTIL, (datetime.now() + timedelta(hours=1)).strftime(\"%H:%M\")\n )\n target_devices = [\n dev for dev in hass.data[DOMAIN][\"entities\"] if dev.entity_id in entity_id\n ]\n target_device: WarmupThermostat\n for target_device in target_devices:\n target_device.set_override(temperature, until)\n target_device.schedule_update_ha_state(True)\n\n _LOGGER.info(\"Setting up platform for Warmup component\")\n user = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n\n warmup = Warmup4IE(user, password)\n\n if warmup is None or not warmup.setup_finished:\n raise PlatformNotReady\n warmup_client = WarmupClient(warmup)\n to_add = []\n for device in warmup.get_all_devices().values():\n to_add.append(WarmupThermostat(hass, device, warmup_client))\n add_entities(to_add)\n hass.data[DOMAIN][\"entities\"] = to_add\n hass.services.register(DOMAIN, \"set_override\", service_set_override)\n return True", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n lights = []\n for channel, device_config in config[CONF_DEVICES].items():\n device = {}\n device[\"name\"] = device_config[CONF_NAME]\n device[\"dimmable\"] = device_config[\"dimmable\"]\n device[\"channel\"] = channel\n device[\"driver\"] = config[CONF_DRIVER]\n device[\"host\"] = config[CONF_HOST]\n device[\"port\"] = config[CONF_PORT]\n lights.append(FutureNowLight(device))\n\n add_entities(lights, True)", "async def init(self):\n logger.info(\"Init device: %s\", self._serial)\n self._callback(STATUS_INIT)\n\n self._init_binaries()\n self._init_apks()\n await self._init_forwards()\n\n await adb.shell(self._serial, \"/data/local/tmp/atx-agent server --stop\")\n await adb.shell(self._serial, \"/data/local/tmp/atx-agent server --nouia -d\")", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n _type: str = config[CONF_TYPE]\n name: str = config[CONF_NAME]\n\n if hass.config.latitude < 0:\n hemisphere = SOUTHERN\n elif hass.config.latitude > 0:\n hemisphere = NORTHERN\n else:\n hemisphere = EQUATOR\n\n _LOGGER.debug(_type)\n add_entities([Season(hemisphere, _type, name)], True)", "def setup_platform(hass, config, add_entities, discovery_info=None):\n import jsonpath\n jsonpath = jsonpath.jsonpath\n global HEAT_PUMPS\n hub.update_overview()\n if int(hub.config.get(CONF_CLIMATE, 1)):\n HEAT_PUMPS = hub.get('$.heatPumps')\n if HEAT_PUMPS:\n for heat_pump in HEAT_PUMPS[0]:\n device_label = jsonpath(heat_pump, '$.deviceLabel')[0]\n add_entities([\n VerisureHeatPump(device_label)\n ])", "def _setup_sensor ( self ):\n self.spectral = Spectral ( np.array([450, 520, 630, 770., 1550, 2090.] ),\n np.array([ 520, 600, 690, 900., 1750., 2350.] ) )", "def setup_platform(hass, config, add_entities, discovery_info=None):\n devices = []\n dev = discovery_info.get(\"dev\")\n param = discovery_info.get(\"param\")\n devices = []\n for idx in dev['data']:\n if dev['devtype'] in OT_SENSOR_TYPES and idx in [\"Z\",\"V\",\"P3\",\"P4\"]:\n devices.append(LifeSmartSensor(dev,idx,dev['data'][idx],param))\n else:\n devices.append(LifeSmartSensor(dev,idx,dev['data'][idx],param))\n add_entities(devices)", "def _create_binary_sensor(\n hass: HomeAssistant, knx_module: XKNX, config: ConfigType\n) -> XknxBinarySensor:\n device_name = config[CONF_NAME]\n actions = []\n automations = config.get(BinarySensorSchema.CONF_AUTOMATION)\n if automations is not None:\n for automation in automations:\n counter = automation[BinarySensorSchema.CONF_COUNTER]\n hook = automation[BinarySensorSchema.CONF_HOOK]\n action = automation[BinarySensorSchema.CONF_ACTION]\n script_name = f\"{device_name} turn ON script\"\n script = Script(hass, action, script_name, DOMAIN)\n action = XknxActionCallback(\n knx_module, script.async_run, hook=hook, counter=counter\n )\n actions.append(action)\n\n return XknxBinarySensor(\n knx_module,\n name=device_name,\n group_address_state=config[BinarySensorSchema.CONF_STATE_ADDRESS],\n sync_state=config[BinarySensorSchema.CONF_SYNC_STATE],\n device_class=config.get(CONF_DEVICE_CLASS),\n ignore_internal_state=config[BinarySensorSchema.CONF_IGNORE_INTERNAL_STATE],\n reset_after=config.get(BinarySensorSchema.CONF_RESET_AFTER),\n actions=actions,\n )", "def __init__(self):\n super(UpnpEmbeddedDevice, self).__init__()\n return" ]
[ "0.6923445", "0.6406349", "0.6367884", "0.630438", "0.6297725", "0.62513864", "0.61724937", "0.61561346", "0.61022043", "0.60586834", "0.60506696", "0.60256183", "0.59961325", "0.59727365", "0.59395474", "0.5925092", "0.5920433", "0.5904643", "0.58785045", "0.5876473", "0.58727616", "0.58726966", "0.58704424", "0.58220845", "0.5789167", "0.5765442", "0.5754448", "0.5746082", "0.57348514", "0.571501", "0.5709718", "0.5708997", "0.5704853", "0.5703996", "0.5698553", "0.5689435", "0.56888753", "0.5678156", "0.56557137", "0.5652327", "0.56336546", "0.56336546", "0.5629089", "0.5628019", "0.561134", "0.5610526", "0.56036854", "0.55956596", "0.5590719", "0.558624", "0.5583422", "0.5571518", "0.55670226", "0.55624485", "0.55567646", "0.55561256", "0.5552413", "0.55513716", "0.5546617", "0.55454385", "0.5544349", "0.554177", "0.5541614", "0.5538822", "0.55320084", "0.55306226", "0.55255485", "0.55212533", "0.5521192", "0.55207235", "0.5517259", "0.55142826", "0.5513926", "0.5511641", "0.5509405", "0.5508798", "0.5503813", "0.5494628", "0.54935926", "0.5487446", "0.5486982", "0.546875", "0.54636836", "0.5462327", "0.54614526", "0.54372835", "0.54251546", "0.54224783", "0.54154557", "0.5415305", "0.5415282", "0.54145104", "0.5410341", "0.54093885", "0.54081726", "0.53986275", "0.5395311", "0.5386511", "0.537682", "0.53728324" ]
0.7185503
0
Initialize the EnOcean binary sensor.
def __init__(self, dev_id, devname): enocean.EnOceanDevice.__init__(self) self.stype = "listener" self.dev_id = dev_id self.which = -1 self.onoff = -1 self.devname = devname
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self._read_calibration_data()\n self.configure_sensor(\n TemperatureOversamplings.x08,\n PressureOversamplings.x16,\n HumidityOversamplings.x08,\n IIRFilterCoefficients.FC_003,\n 250,\n 250)", "def _initialize(self):\n self.flush()\n print(\"Initializing sensor...\")\n try:\n self.get_sample()\n print('Initialization successful')\n except:\n print('Initialization failed. Please disconnect and reconnect sensor.')", "def __init__(self):\n self._read_calibration_data()\n self.set_oversamplings_and_mode(\n HumidityOversampling.x08,\n TemperatureOversampling.x08,\n PressureOversampling.x16,\n SensorMode.Normal)\n self.set_config(\n InactiveDuration.ms1000,\n FilterCoefficient.fc04)", "def __init__(self):\n self.inches_moved = 0\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.running = True\n self.ir_sensor = ev3.InfraredSensor()\n self.color_sensor = ev3.ColorSensor()\n assert self.color_sensor\n assert self.ir_sensor\n assert self.touch_sensor\n self.arm_motor.position = 0\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n assert self.pixy\n\n self.right_motor_encoder = self.right_motor.position\n self.left_motor_encoder = self.left_motor.position", "def setUp(self):\n self.ser = Serial()\n self.device_obj = ZBSensor(self.ser)", "def setup_platform(hass, config, add_devices, discovery_info=None):\n dev_id = config.get(CONF_ID, None)\n devname = config.get(CONF_NAME, \"EnOcean binary sensor\")\n add_devices([EnOceanBinarySensor(dev_id, devname)])", "def setUp(self):\n self.sensor = Sensor('127.1.1.3', 9000)", "def __init__(self, pin =0):\n\t\tself.uv_sensor = pin\n\t\tgrovepi.pinMode(self.uv_sensor, \"INPUT\")", "def __init__(self):\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.color_sensor = ev3.ColorSensor()\n self.ir_sensor = ev3.InfraredSensor()\n self.MAX_SPEED = 900\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n assert self.left_motor.connected\n assert self.right_motor.connected\n assert self.arm_motor.connected\n assert self.touch_sensor\n assert self.color_sensor\n assert self.ir_sensor\n assert self.pixy", "def __init__(self):\n\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.color_sensor = ev3.ColorSensor()\n self.ir_sensor = ev3.InfraredSensor()\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n\n assert self.left_motor.connected\n assert self.right_motor.connected\n assert self.arm_motor.connected\n assert self.touch_sensor\n assert self.color_sensor\n assert self.ir_sensor\n assert self.pixy", "def __init__(self) -> None:\n self.sensor = serial.Serial(config.DEVICE)\n super().__init__()", "def __init__(self):\r\n # Check device ID.\r\n chip_id = self._read_byte(_BME280_REGISTER_CHIPID)\r\n if _BME280_CHIPID != chip_id:\r\n raise RuntimeError('Failed to find BME280! Chip ID 0x%x' % chip_id)\r\n self._write_register_byte(_BME280_REGISTER_SOFTRESET, 0xB6)\r\n time.sleep(0.5)\r\n self._read_coefficients()\r\n self.sea_level_pressure = 1013.25\r\n \"\"\"Pressure in hectoPascals at sea level. Used to calibrate `altitude`.\"\"\"\r\n # turn on humidity oversample 16x\r\n self._write_register_byte(_BME280_REGISTER_CTRL_HUM, 0x03)\r\n self._t_fine = None", "def __init__(self):\n self.read_input()\n self.update_binaries()", "def __init__(self, parent, endpoint):\n Wemo_Endpoint.__init__(self, parent, endpoint)\n self.device_type = self._Parent._DeviceTypes.get('wemo_binary_sensor')\n self.FEATURES.update({\n FEATURE_BRIGHTNESS: False,\n FEATURE_PERCENT: False,\n FEATURE_NUMBER_OF_STEPS: False\n })", "def initialize_electronics(self):\n\n self.electronics = ArduinoModel(**self.config['electronics']['arduino'])\n self.logger.info('Initializing electronics arduino')\n self.electronics.initialize()", "def _initialize(self):\n self.send_init_command()", "def init_edra(self) -> None:\n ...", "async def init(self):\n logger.info(\"Init device: %s\", self._serial)\n self._callback(STATUS_INIT)\n\n self._init_binaries()\n self._init_apks()\n await self._init_forwards()\n\n await adb.shell(self._serial, \"/data/local/tmp/atx-agent server --stop\")\n await adb.shell(self._serial, \"/data/local/tmp/atx-agent server --nouia -d\")", "def initialize(self):\n self.lib.Initialize()\n\n self.triggers = {'Internal': 0, 'External': 1, 'External Start': 6,\n 'External Exposure': 7, 'External FVB EM': 9,\n 'Software Trigger': 10,\n 'External Charge Shifting': 12}\n self.savetypes = {'Signed16bits': 1, 'Signed32bits': 2, 'Float': 3}\n\n # Initial values\n\n self.readout_packing_state = False\n self.readout_packing = self.readout_packing_state\n\n self.readout_mode_mode = 'Image'\n self.readout_mode = self.readout_mode_mode\n\n self.photon_counting_mode_state = False\n self.photon_counting_mode = self.photon_counting_mode_state\n\n self.frame_transfer_mode_state = False\n self.frame_transfer_mode = self.frame_transfer_mode_state\n\n self.fan_mode_index = 'onfull'\n self.fan_mode = self.fan_mode_index\n\n self.EM_gain_mode_index = 'RealGain'\n self.EM_gain_mode = self.EM_gain_mode_index\n\n self.cooled_on_shutdown_value = False\n self.cooled_on_shutdown = self.cooled_on_shutdown_value\n\n self.baseline_offset_value = 100\n self.baseline_offset = self.baseline_offset_value\n\n self.adv_trigger_mode_state = True\n self.adv_trigger_mode = self.adv_trigger_mode_state\n\n self.acq_mode = 'Single Scan'\n self.acquisition_mode = self.acq_mode\n\n self.amp_typ = 0\n\n self.horiz_shift_speed_index = 0\n self.horiz_shift_speed = self.horiz_shift_speed_index\n\n self.vert_shift_speed_index = 0\n self.vert_shift_speed = self.vert_shift_speed_index\n\n self.preamp_index = 0\n self.preamp = self.preamp_index\n\n self.temperature_sp = 0 * degC\n self.temperature_setpoint = self.temperature_sp\n\n self.auxout = np.zeros(4, dtype=bool)\n for i in np.arange(1, 5):\n self.out_aux_port[i] = False\n\n self.trigger_mode_index = 'Internal'\n self.trigger_mode = self.trigger_mode_index", "def __init__(self):\n self.server_name = 'Binary Light Device'\n self.device = None", "def _initialize(self):\r\n print(\"Set the CP mode to EVSE\")\r\n self.whitebeet.controlPilotSetMode(1)\r\n print(\"Set the CP duty cycle to 100%\")\r\n self.whitebeet.controlPilotSetDutyCycle(100)\r\n print(\"Start the CP service\")\r\n self.whitebeet.controlPilotStart()\r\n print(\"Start SLAC in EVSE mode\")\r\n self.whitebeet.slacStart(1)\r\n time.sleep(2)", "def autonomousInit(self):\n fieldState = self.driverStation.getGameSpecificMessage()\n self.fieldState = fieldState\n self.smartDashboard.putString(\"field state\", fieldState)\n fieldPosition = self.smartDashboard.getString(\"field position\", \"\")\n self.startingFieldPosition = self.parserobotFieldPosition(fieldPosition)\n self.smartDashboard.putNumber(\"position\", self.startingFieldPosition)\n \n #convert field states to our enum values \n self.ourSwitchSide = self.parserobotFieldPosition(self.fieldState[0])\n self.scaleSide = self.parserobotFieldPosition(self.fieldState[1])\n self.theirSwitchSide = self.parserobotFieldPosition(self.fieldState[2])\n if self.startingFieldPosition==self.kNothing:\n print(\"No field position set. Aborting\")\n return \n \n \n #self.Encoder.setMaxPeriod(.1)\n #self.Encoder.setMinRate(10)\n #self.Encoder.setDistancePerPulse(5)\n #self.Encoder.setReverseDirection(True)\n #self.Encoder.getDistance()\n \n \"\"\"self.Encoder.reset()\n while (self.Encoder.get() < value):\n drive\n delay\"\"\"\n \n \n \n \n \n \n \n #self.Encoder.getRawAxis()\n \n \n #todo change RRR to from fms, maybe parse it first\n \n self.autonomousProgram = commands.autonomousCommand.AutonomousProgram(self.startingFieldPosition)\n self.autonomousProgram.start()", "def __init__(self, db_info_obj):\n EODataDownSensor.__init__(self, db_info_obj)\n self.sensor_name = \"Sentinel1ASF\"\n self.db_tab_name = \"EDDSentinel1ASF\"\n self.base_api_url = \"https://api.daac.asf.alaska.edu/services/search/param\"\n\n self.use_roi = False\n self.intersect_vec_file = ''\n self.intersect_vec_lyr = ''\n self.subset_vec_file = ''\n self.subset_vec_lyr = ''\n self.mask_outputs = False\n self.mask_vec_file = ''\n self.mask_vec_lyr = ''\n self.std_vis_img_stch = None", "def init(self):\n self.reset()\n\n self.__interface.send_command('POWER_SETTING')\n self.__interface.send_data(0x37)\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('PANEL_SETTING')\n self.__interface.send_data(0xCF)\n self.__interface.send_data(0x08)\n\n self.__interface.send_command('BOOSTER_SOFT_START')\n self.__interface.send_data(0xc7)\n self.__interface.send_data(0xcc)\n self.__interface.send_data(0x28)\n\n self.__interface.send_command('POWER_ON')\n self.wait_until_idle()\n\n self.__interface.send_command('PLL_CONTROL')\n self.__interface.send_data(0x3c)\n\n self.__interface.send_command('TEMPERATURE_CALIBRATION')\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('VCOM_AND_DATA_INTERVAL_SETTING')\n self.__interface.send_data(0x77)\n\n self.__interface.send_command('TCON_SETTING')\n self.__interface.send_data(0x22)\n\n self.__interface.send_command('TCON_RESOLUTION')\n self.__interface.send_data(0x02) #source 640\n self.__interface.send_data(0x80)\n self.__interface.send_data(0x01) #gate 384\n self.__interface.send_data(0x80)\n\n self.__interface.send_command('VCM_DC_SETTING')\n self.__interface.send_data(0x1E) #decide by LUT file\n\n self.__interface.send_command(0xe5, False) #FLASH MODE\n self.__interface.send_data(0x03)", "def initialize(self,*args,**kwargs):\n self.__instrumentID = c_uint32(0) \n self.__numInstruments = c_uint32()\n self.__nbrOfChannels = c_uint32()\n self.__nbrADCBits = c_uint32()\n self.__temperature = c_int32()\n self.__time_us = c_double()\n\n self.loadDLLs(**kwargs) # Load the different DLLs or DLL based modules\n self.reinit() # init or reinit the board\n self.createDictAndGlobals() # create dictionaries and global variables\n self.nbrOfChannels=int(self.__nbrOfChannels.value) # duplicate self.nbrOfChannels in a Python type variable \n self.getInitialConfig()", "def __init__(self):\n self.charm_config = hookenv.config()\n self.kv = unitdata.kv()\n if not self.synapse_signing_key_file:\n self.synapse_signing_key_file = \"{}/{}.signing.key\".format(\n self.synapse_conf_dir, self.get_server_name()\n )", "def __init__(self):\n super(EventDataStream, self).__init__()\n self.file_entropy = None\n self.md5_hash = None\n self.path_spec = None\n self.sha1_hash = None\n self.sha256_hash = None\n self.yara_match = None", "def __init__(self, config, sensor=None):\n if sensor is None:\n from monitor.sensor import SensorDriver\n self.sensor = SensorDriver(config.getint(CONFIG_SECTION, \"trigger_pin\"),\n config.getint(CONFIG_SECTION, \"echo_pin\"))\n else:\n self.sensor = sensor\n self.num_samples = config.getint(CONFIG_SECTION, \"num_samples\")\n self.drop_extremes = config.getboolean(CONFIG_SECTION, \"drop_extremes\")\n self.sample_delay = config.getfloat(CONFIG_SECTION, \"sample_delay\")\n self.is_running = False\n self.dist_to_bottom = config.getfloat(CONFIG_SECTION, \"distance_to_bottom\")", "def __init__(self):\n\n super().__init__()\n\n self.active = True\n self.driver = Driver.instance()\n self.sensor_manager = SensorManager.instance()\n\n self.pwm = Adafruit_PCA9685.PCA9685(address=0x40, busnum=1) # create PCA9685-object at I2C-port\n self.pwm.set_pwm_freq(50)\n\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(20, GPIO.OUT)\n GPIO.setup(21, GPIO.OUT)\n GPIO.setup(26, GPIO.OUT)\n self.driven_distance = 0", "def initialize(self) -> None:\n # Set motors to stop, read encoder values for starting point\n self.drive.arcadeDrive(0, 0)\n self.drive.resetEncoders()", "def init():\n rino.initialize.initialize()", "def init(self):\n self.AOMBoxConnection = pyArdDAC.ARD_DAC(HOST=self.AOMBox_IP, PORT=8888, DEBUG=False)#connects to arduino in High frequency Na AOM box\n #channel number should be defined in subclass\n self.INTEGER_MIN = 0\n self.INTEGER_MAX = 65535\n self.VOLTAGE_MIN = 0.0\n self.VOLTAGE_MAX = 5.0\n self.initialised=True\n return \"%s init successful\" % self.hardwareActionName", "def __init__(self,\n moduletype: str = 'pH', \n name: str = 'Atlas_pH_sensor', \n bus: int = 1, \n address: int = 99) -> None:\n # The .initialise method is called in AtlasI2C __init__ \n # to initialise the sensors.\n super().__init__(moduletype=moduletype, name=name, bus=bus, address=address)", "def initialize(self):\n watch_tv = self.args['watch_tv']\n cleaning = self.args['cleaning']\n self.sensor_living = self.get_app('globals').sensor_living # type: Sensor\n self.sensor_bedroom = self.get_app('globals').sensor_bedroom # type: Sensor\n self.sensor_spare = self.get_app('globals').sensor_spare # type: Sensor\n self.listen_state(self.watching_tv, watch_tv, new=\"on\")\n self.listen_state(self.stop_watching, watch_tv, new=\"off\")\n self.listen_state(self.clean_on, cleaning, new='on')\n self.listen_state(self.clean_off, cleaning, new='off')", "def __init__(self):\n self.data0 = [] # This will hold data from ADC0\n self.data1 = [] # This will hold data from ADC1\n self.dev = _configure_device()", "def __init__(self, input_pin, output_pin):\n self.input_pin = input_pin\n self.output_pin = output_pin\n self.power_value = 0\n self.shortage_value = 0\n self.json_message = self.get_json()", "def __init__(self):\n GPIO.setwarnings(False)\n GPIO.cleanup() # Reset the high and low levels of the GPIO port\n #The following code defines the GPIO used to control the L298N chip. This definition is different for different Raspberry Pi driver boards.\n self.Motor_A_EN = 17\n self.Motor_B_EN = 4\n self.Motor_A_Pin1 = 27\n self.Motor_A_Pin2 = 18\n self.Motor_B_Pin1 = 21\n self.Motor_B_Pin2 = 26\n self.setup()", "def initialize(self):\n self.log.info(\"Initialize raspPinball hardware.\")\n\n self.config = self.machine.config['rasppinball']\n self.machine.config_validator.validate_config(\"rasppinball\", self.config)\n print(\"***************************\")\n print(self.config)\n #self.machine_type = (\n # self.machine.config['hardware']['driverboards'].lower())\n\n self._connect_to_hardware()\n\n\n # keypad\n self._kp = Keypad()\n self.old_key = \"\"\n self.key = \"\"\n # leds\n self.init_strips()", "def __init__(self, input_pin: int) -> None:\n # Instansieer de afstand sensor pin.\n self._afstandsensor_input_pin = input_pin\n GPIO.setup(self._afstandsensor_input_pin, GPIO.IN,\n pull_up_down=GPIO.PUD_UP)\n\n # Zet fake opgepakt op false.\n self._fake_opgepakt = False", "def _InitializeBase(self):\n imuen = rospy.get_param(\"~imuenable\", \"True\")\n if imuen:\n message = 'Startimu\\r'\n else:\n message = 'Startnoimu\\r'\n\n rospy.loginfo(\"Initializing Base \" + message)\n self._WriteSerial(message)\n \n lincorrection = rospy.get_param(\"~linear_correction\", 1.0)\n angcorrection = rospy.get_param(\"~angular_correction\", 0.984)\n message = 'ascale %d %d\\r' % self._GetBaseAndExponent(angcorrection)\n rospy.loginfo(\"Sending correction value: \" + message)\n self._WriteSerial(message)\n message = 'lscale %d %d\\r' % self._GetBaseAndExponent(lincorrection)\n rospy.loginfo(\"Sending correction value: \" + message)\n self._WriteSerial(message)", "def __init__(self):\n \n # Initialize logger\n self._log = logging.getLogger(\"OemGateway\")\n \n # Initialize variables\n self._data_buffer = []\n self._settings = {}", "def init(self, exposure=None, frame_rate=None, trig_mode=None,\n config_file=\"\", aoi_rect=None, binning=None):\n self.connect(config_file) # use config file if defined else \"\"\n if aoi_rect is not None:\n self.aoi_rect = aoi_rect\n if exposure is not None:\n self.exposure = exposure\n if frame_rate is not None:\n self.frame_rate = frame_rate\n if trig_mode is not None:\n self.trig_mode = trig_mode\n if binning is not None:\n self.binning = binning\n if cygnet4k.debug:\n print('binning: %dx%d' % tuple([self.binning]*2))\n print('AOI rect: %s' % (self.aoi_rect,))\n print('ROI rect: %s' % (self.aoi_rect,))\n print('exposure: %g ms' % (self.exposure,))\n print('int. clock: %g Hz' % (self.frame_rate,))\n print('trig_mode: %s' % bin(self.trig_mode,))", "def __init__(self, address=0x76):\n self.address = address\n self.bus = self._initialize_bus()\n\n self.chip_id, self.chip_version = self._get_info_about_sensor()", "def __init_euca(self):\n if self.euca:\n return\n self.euca = Euca2ool()", "def GPIO_initialization():\n GPIO.setmode(GPIO.BCM)\n\n GPIO.setup(Sensor.IN_1, GPIO.OUT)\n GPIO.setup(Sensor.IN_2, GPIO.OUT)\n GPIO.setup(Sensor.EN, GPIO.OUT)\n\n GPIO.setup(Membrane_Switch.PSEUDO_MEMBRANE_SWITCH['RED_STOP'], GPIO.IN)\n GPIO.setup(Membrane_Switch.PSEUDO_MEMBRANE_SWITCH['YELLOW_CW'], GPIO.IN)\n GPIO.setup(Membrane_Switch.PSEUDO_MEMBRANE_SWITCH['GREEN_CCW'], GPIO.IN)\n\n GPIO.output(Sensor.IN_1, GPIO.LOW)\n GPIO.output(Sensor.IN_2, GPIO.LOW)", "def __init__(self, sensor):\n self.sensor = sensor\n self.temperature = None\n self.humitidy = None\n self.update()", "def __init__(self, reset=True):\n self.__helper = _ABEHelpers()\n\n self.__bus = self.__helper.get_smbus()\n self.__bus.write_byte_data(\n self.__ioaddress, self.IOCON, self.__ioconfig)\n self.__port_a_value = self.__bus.read_byte_data(\n self.__ioaddress, self.GPIOA)\n self.__port_b_value = self.__bus.read_byte_data(\n self.__ioaddress, self.GPIOB)\n if reset is True:\n self.__bus.write_byte_data(self.__ioaddress, self.IODIRA, 0xFF)\n self.__bus.write_byte_data(self.__ioaddress, self.IODIRB, 0xFF)\n self.set_port_pullups(0, 0x00)\n self.set_port_pullups(1, 0x00)\n self.invert_port(0, 0x00)\n self.invert_port(1, 0x00)\n\n return", "def initialize(self):\n self.ha_url = self.args.get(\"ha_url\", None)\n self.use_current_brightness = self.args.get(\"use_current_brightness\", False)\n self.condition = self.args.get(\"condition\")\n self.lights = self.args[\"lights\"]\n self.listen_state(self.change_lights_color, self.args[\"media_player\"], attribute = self.args.get(\"photo_attribute\", \"entity_picture\"))", "def __init__(self, hass, config):\n super().__init__()\n self._parent = hass\n self.ezvizService = hass.data[PY_EZVIZ_GATEWAY]\n\n self.deviceSerial = self.ezvizService.deviceSerial\n\n self._interval_snapshots = 30\n self._last_image = \"\"\n self._last_snapshot_time = 0", "def do_load_weather(self, *arg):\n try:\n port = self.config['weather']['aag_cloud']['serial_port']\n except KeyError:\n port = '/dev/ttyUSB0'\n\n print(\"Loading AAG Cloud Sensor on {}\".format(port))\n self.weather = AAGCloudSensor(serial_address=port, use_mongo=True)\n self.do_enable_sensor('weather')", "def __init__(self):\n\n self.wp = wp\n self.wp.wiringPiSetup()\n\n self.LEDON_PIN = 21\n self.SENSOR_PINS = [22, 26, 23, 27, 24, 28, 25, 29]\n self.NUM_SENSORS = len(self.SENSOR_PINS)\n self.CHARGE_TIME = 10 #us to charge the capacitors\n self.READING_TIMEOUT = 1000 #us, assume reading is black\n\n self.sensorValues = []\n self.calibratedMax = []\n self.calibratedMin = []\n self.lastValue = 0\n self.init_pins()", "def _setup_sensor ( self ):\n self.spectral = Spectral ( np.array([500, 610, 780, 1580.] ),\n np.array([590, 680, 890, 1750.] ) )", "def __init__(self, envirophat, use_leds):\n self.envirophat = envirophat\n self.use_leds = use_leds\n # sensors readings\n self.light = None\n self.light_red = None\n self.light_green = None\n self.light_blue = None\n self.accelerometer_x = None\n self.accelerometer_y = None\n self.accelerometer_z = None\n self.magnetometer_x = None\n self.magnetometer_y = None\n self.magnetometer_z = None\n self.temperature = None\n self.pressure = None\n self.voltage_0 = None\n self.voltage_1 = None\n self.voltage_2 = None\n self.voltage_3 = None", "async def _hw_init(self):\n await self._write_async(b\":XR\\r\") # Broadcast: initialize + execute\n # Note: no need to consume reply here because there is none (since we are using broadcast)", "def __init__(self):\n self.hw = dev_hwinfo.device()\n self.ethKey=\"Ethernet\"\n self.ethAllInterfaceName=[]\n dir_path = os.path.dirname(os.path.realpath(__file__))\n self.myDefine = init_define.main()\n self.mPlatform=self.hw.getPlatform()", "def _setup_sensor ( self ):\n self.spectral = Spectral ( np.array([450, 520, 630, 770., 1550, 2090.] ),\n np.array([ 520, 600, 690, 900., 1750., 2350.] ) )", "def __init__(self, tools, fdt, output, bundle):\n self._tools = tools\n self._fdt = fdt\n self._out = output\n self._bundle = bundle\n self.text_base = self._fdt.GetInt('/chromeos-config', 'textbase', -1)\n\n # For speed, use the 'update' algorithm and don't verify\n self.update = True\n self.verify = False\n\n # Use default servo port\n self._servo_port = 0", "def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()", "def __init__(self, node, mac, sensor_id):\n super().__init__(node, mac)\n self.sensor_id = sensor_id\n self.sensor_type = SENSORS[sensor_id]\n self.node_callbacks = (AVAILABLE_SENSOR_ID, sensor_id)", "def __init__(self):\n self.sensor_value = dict()", "def send_init_event(self):\n self.status['type'] = '__init__'\n self._send()", "def initialise(self):\n self.device.initialise()\n return \"OK\"", "def __init__(self, device):\n self.device = device\n self.io = serial.Serial(device, 57600, timeout=1)\n self.keys = ['time', 'centroid_x', 'centroid_y', 'centroid_r',\n 'level_1', 'level_2', 'level_3',\n 'width_1', 'width_2', 'width_3',\n 'height_1', 'height_2', 'height_3',\n 'power']", "def __init__(self, ase):\n self.bin = open(ase, 'rb')", "def initialize(self):\n global VERSION_DATE\n\n data = self._request.getData()\n pyhttp = self._request.getHttp()\n config = self._request.getConfiguration()\n\n data[\"pyblosxom_version\"] = VERSION_DATE\n data['pi_bl'] = ''\n\n # Get our URL and configure the base_url param\n if pyhttp.has_key('SCRIPT_NAME'):\n if not config.has_key('base_url'):\n config['base_url'] = 'http://%s%s' % (pyhttp['HTTP_HOST'], pyhttp['SCRIPT_NAME'])\n else:\n config['base_url'] = config.get('base_url', '')\n\n if config[\"datadir\"].endswith(\"\\\\\") or config[\"datadir\"].endswith(\"/\"):\n config['datadir'] = config['datadir'][:-1]\n\n # import and initialize plugins\n import plugin_utils\n plugin_utils.initialize_plugins(config.get(\"plugin_dirs\", []), config.get(\"load_plugins\", None))\n\n # entryparser callback is run here first to allow other plugins\n # register what file extensions can be used\n data['extensions'] = tools.run_callback(\"entryparser\",\n {'txt': blosxom_entry_parser},\n mappingfunc=lambda x,y:y,\n defaultfunc=lambda x:x)", "def init(self):\n self.ready_event = gevent.event.Event()\n\n self.intensity_ranges = []\n self.intensity_measurements = []\n\n self.bl_hwobj = self.getObjectByRole(\"beamline_setup\")\n\n try:\n for intens_range in self['intensity']['ranges']:\n temp_intens_range = {}\n temp_intens_range['max'] = intens_range.CurMax\n temp_intens_range['index'] = intens_range.CurIndex\n temp_intens_range['offset'] = intens_range.CurOffset\n self.intensity_ranges.append(temp_intens_range)\n self.intensity_ranges = sorted(self.intensity_ranges,\n key=lambda item: item['max'])\n except:\n logging.getLogger(\"HWR\").error(\\\n \"BeamlineTest: No intensity ranges defined\")\n\n self.chan_intens_mean = self.getChannelObject('intensMean')\n self.chan_intens_range = self.getChannelObject('intensRange')\n\n self.cmd_set_intens_resolution = \\\n self.getCommandObject('setIntensResolution')\n self.cmd_set_intens_acq_time = \\\n self.getCommandObject('setIntensAcqTime')\n self.cmd_set_intens_range = \\\n self.getCommandObject('setIntensRange')", "def __init__( self, dev, port ):\n super( Grove_Light_Sensor, self ).__init__( dev, port )", "def initialize(self):\n self._entity = self.args.get(\"new_entity\")\n icon = self.args.get(\"icon\", \"mdi:motion-sensor\")\n friendly_name = self.args.get(\"friendly_name\", \"Last motion\")\n self._entity_attributes = {\"icon\": icon, \"friendly_name\": friendly_name}\n self._date_format = self.args.get(\"format_last_changed\")\n\n # Set up state history for attributes\n self._history = deque([], maxlen=int(self.args.get(\"max_history\")))\n\n # Listen for binary sensor activations and store friendly names for them\n bin_sensors: Dict[str, str] = self.args.get(\"binary_sensors\")\n self._friendly_names = {}\n for sensor, pretty_name in bin_sensors.items():\n self._friendly_names[sensor] = pretty_name\n self.listen_state(self._bin_sensor_activation, sensor, new=\"on\")\n\n # recover old values, if any\n old_attrs = self.get_state(self._entity, attribute=\"all\")\n if old_attrs:\n state = old_attrs.get(\"state\", \"unknown\")\n for k, old_value in reversed(old_attrs.get(\"attributes\", {}).items()):\n if k.startswith(_ATTR_NAME) and \": \" in old_value:\n self._history.append(old_value)\n # Re-Publish old state\n self._set_new_sensor_state(state)", "def __init__(self, \n address: int = 100, \n moduletype: str = 'EC', \n name: str = 'Atlas_EC_sensor', \n bus: int = 1) -> None:\n # The .initialise method is called in AtlasI2C __init__ \n # to initialise the sensors.\n\n super().__init__(moduletype=moduletype, name=name, address=address, bus=bus)\n\n for param in self._PARAMS: # Ensures that all measurement params are enabled.\n self.query(f'O,{param},1')\n time.sleep(2) # TODO: Test this with no delay, if it works remove line. 21/07/2021", "def __init__(self):\n self.device_id = None\n self.devices = []\n self.onvif_config = {}", "def initializeOutlet(interface):\n info = StreamInfo('OpenBCI_EEG', 'EEG', 4, 256, 'float32', 'openbci12345')\n outlet = StreamOutlet(info)\n return outlet", "def _initialize_data(self):\n self.unique_id = 123\n\n self.gas_valve_open = False\n self.buffer_valve_open = False\n self.pump_valve_open = False\n\n self.operatingmode = 0\n\n self.sample_pressure_high_limit = 100\n self.sample_pressure_low_limit = 10\n self.sample_pressure = 0\n\n self.error = 0\n\n self.buffer_pressure_high = True", "def __init__(self, device_id, interior_sensor=\"null\", exterior_sensor=\"null\"):\r\n self.device_id = device_id\r\n self.interior_sensor = interior_sensor\r\n self.exterior_sensor = exterior_sensor", "def __init__(self):\n # FIXME: IS this needed?\n super(ArduinoStation, self).__init__()\n\n self.serial_port_pattern = '/dev/ttyACM{port_num}'\n self.serial_port_num = None\n self.baudrate = 9600\n self.ser = self._setup_serial_connection()\n\n\n # Sensor 1 (DHT11) has 2 readings, Sensor 2 has 1\n ## FIXME: Should look for key pairs in list and submit when no more unique readings are coming through\n if config.SCB_CONFIGURATION == 'standard':\n self.lines_per_observation = 3\n else:\n self.lines_per_observation = 7 # Allows for up to 5 DS18B20 along w/ DHT-11.", "def antenny_init_components(self):\n if self.antenny_config is None:\n print(\"Please load a config before initializing components\")\n if not self.antenny_config.check():\n print(\"Config {} is not valid, failed to initialize\".format(self.antenny_config.get_name()))\n print(\"If you believe this is an error, or you have modified the base components of the antenny board, \"\n \"please check Config class as well as the default configs for more details.\")\n\n self.imu_init()\n self.pwm_controller_init()\n self.elevation_servo_init()\n self.azimuth_servo_init()\n self.screen_init()\n self.gps_init()\n self.telemetry_init()\n self.platform_init()", "def init(self):\n self.l_motor = lazytalonsrx.LazyTalonSRX(Constants.IL_MOTOR_ID)\n self.r_motor = lazytalonsrx.LazyTalonSRX(Constants.IR_MOTOR_ID)\n self.l_motor.initialize(\n inverted=False, encoder=False, phase=False, name=\"Intake Left\")\n self.r_motor.initialize(\n inverted=True, encoder=False, phase=False, name=\"Intake Right\")", "def __init__(self, kwargs):\n if 'PoetEnclaveImplementation' in kwargs:\n enclave_module = kwargs['PoetEnclaveImplementation']\n else:\n enclave_module = 'sawtooth_validator.consensus.poet0.' \\\n 'poet_enclave_simulator' \\\n '.poet0_enclave_simulator'\n\n poet_enclave = importlib.import_module(enclave_module)\n poet_enclave.initialize(**kwargs)\n WaitCertificate.poet_enclave = poet_enclave\n WaitTimer.poet_enclave = poet_enclave", "def __init__(self):\n self.base_dir = '/sys/bus/w1/devices/'\n self.device_folder = glob.glob(self.base_dir + '28*')[0]\n self.device_file = self.device_folder + '/w1_slave'", "def __init__(self):\n\n GPIO.setup(PIN_BTN, GPIO.IN, GPIO.PUD_UP)\n GPIO.setup(PIN_RED_LED_0, GPIO.OUT, GPIO.LOW)\n GPIO.setup(PIN_BLUE_LED, GPIO.OUT, GPIO.LOW)", "def __init__(self, config):\n self._host = config['host']\n self._username = config['username']\n self._password = config['password']\n self._vc_name = config['Name']\n self._ingest_token = config['IngestToken']\n self._ingest_endpoint = config['IngestEndpoint']\n self._ingest_timeout = config['IngestTimeout']\n self._logger = logging.getLogger(self.get_instance_id())\n self._si = None\n self._connect()\n if self._si is None:\n raise ValueError(\"Unable to connect to host\")\n self._ingest = self._create_signalfx_ingest()\n if self._ingest is None:\n raise ValueError(\"Unable to create ingest client\")\n self._additional_dims = config.get('dimensions', None)\n if 'MORSyncInterval' not in config:\n config['MORSyncInterval'] = constants.DEFAULT_MOR_SYNC_INTERVAL\n self._mor_sync_timeout = config.get('MORSyncTimeout', constants.DEFAULT_MOR_SYNC_TIMEOUT)\n self._metric_sync_timeout = config.get('MetricSyncTimeout', constants.DEFAULT_METRIC_SYNC_TIMEOUT)\n self._inventory_mgr = inventory.InventoryManager(self._si, config['MORSyncInterval'],\n config['Name'], self.get_instance_id())\n self._inventory_mgr.start()\n if 'MetricSyncInterval' not in config:\n config['MetricSyncInterval'] = constants.DEFAULT_METRIC_SYNC_INTERVAL\n self._metric_conf = self._get_metric_config(config)\n self._metric_mgr = metric_metadata.MetricManager(self._si, config['MetricSyncInterval'],\n self._metric_conf, config['Name'], self.get_instance_id())\n self._metric_mgr.start()\n self._wait_for_sync()", "def __init__(self, sensor):\n self.sensor = sensor\n self.sensor.update()", "def __init__(self):\n self.bytes = bytearray(3)\n MCP4725.__init__(self)", "def __init__(self, host):\n self._io = RemoteIO(host)\n self._host = host\n\n self._left_wheel = Wheel(id='b', side='left', remote_io=self._io)\n self._right_wheel = Wheel(id='a', side='right', remote_io=self._io, inverse=True)\n\n self._cam = Camera(host)\n\n self._left_led = LED(side='left', remote_io=self._io)\n self._front_led = LED(side='center', remote_io=self._io)\n self._right_led = LED(side='right', remote_io=self._io)", "def __init__(self, device_label):\n self._device_label = device_label\n self._state = None\n self._digits = hub.config.get(CONF_CODE_DIGITS)\n self._changed_by = None\n self._change_timestamp = 0\n self._default_lock_code = hub.config.get(CONF_DEFAULT_LOCK_CODE)", "def init():", "def _custom_endpoint_init(self, node_config, *argv):\n config = {}\n selector = node_config.get('template', None)\n if not selector:\n selector = argv[0]\n _LOGGER.debug(\" selector: %s\", selector)\n config = {\n \"config_report\": [\n [0x0001, 0x0020, 60, 3600, 5], \n [0x0001, 0x0021, 60, 3600, 5]\n ],\n \"in_cluster\": [0x0000, 0x0001, 0x0500, ],\n \"out_cluster\": [0x0500],\n \"type\": \"binary_sensor\",\n }\n self.add_input_cluster(0x0500)\n self.add_output_cluster(0x0500)", "def _init_hardware(self):\n return", "def _initialize_hardware(self):\n # Import\n try:\n import board\n import busio\n import adafruit_vl6180x\n except Exception as ex:\n logging.error(\n '\\n *** ERROR importing Adafruit libraries: {}'.format(\n ex,\n ),\n )\n\n # Things failed, so we must be running locally, not on a widget;\n # don't bother hooking up the VL6180X\n return\n\n # Initialize I2C and VL6180X\n try:\n i2c = busio.I2C(board.SCL, board.SDA)\n self._sensor = adafruit_vl6180x.VL6180X(i2c)\n except Exception as ex:\n logging.error(\n '\\n *** ERROR initializing I2C/LSM303: {}'.format(ex),\n )\n\n self._initialize_id_led()", "def __init__(self):\n self.raw_wires = PyWires.WireNetwork();\n self.__initialize_wires();", "def __init__(self, config):\n spi = SPI(-1, baudrate=config.baudrate,\n sck=config.sck, mosi=config.mosi, miso=config.miso)\n self._epd = epaper2in9.EPD(spi, config.cs, config.dc,\n config.rst1, config.busy)\n self._epd.init()\n self._buffer = Buffer(epaper2in9.EPD_WIDTH, epaper2in9.EPD_HEIGHT)", "def setUpClass(cls):\n cls.nhf = nhflux.NhfluxStream.readBinary(SIMPLE_HEXZ_NHFLUX)", "def __init__(self):\n super().__init__()\n\n # Gadget state\n \n self.isDoorOpen = False\n self.verified = True\n\n # Ev3dev initialization\n self.leds = Leds()\n self.sound = Sound()\n self.drive = MoveTank(OUTPUT_B, OUTPUT_C)\n \n self.ir_sensor = InfraredSensor()\n self.ir_sensor.mode = self.ir_sensor.MODE_IR_REMOTE\n self.color_sensor = ColorSensor()\n self.color_sensor.mode = 'COL-COLOR' # WHITE\n\n # Start threads\n threading.Thread(target=self._patrol_thread, daemon=True).start()", "async def initialize(self, response: bytes) -> None:\n try:\n temp_power = hexlify(response)[154:162]\n self._power_consumption = int(\n temp_power[2:4] + temp_power[0:2], 16)\n self._electric_current = round(\n (self._power_consumption / float(220)), 1)\n\n temp_time_left = hexlify(response)[178:186]\n temp_time_left_seconds = int(temp_time_left[6:8]\n + temp_time_left[4:6]\n + temp_time_left[2:4]\n + temp_time_left[0:2], 16)\n self._time_to_auto_off = await convert_seconds_to_iso_time(\n self._loop,\n temp_time_left_seconds)\n\n temp_auto_off = hexlify(response)[194:202]\n temp_auto_off_seconds = int(temp_auto_off[6:8]\n + temp_auto_off[4:6]\n + temp_auto_off[2:4]\n + temp_auto_off[0:2], 16)\n self._auto_off_set = await convert_seconds_to_iso_time(\n self._loop,\n temp_auto_off_seconds)\n\n temp_state = hexlify(response)[150:154].decode(ENCODING_CODEC)\n self._state = STATE_ON if temp_state == STATE_RESPONSE_ON \\\n else STATE_OFF if temp_state == STATE_RESPONSE_OFF \\\n else STATE_UNKNOWN\n\n self.init_future.set_result(self)\n except HANDLED_EXCEPTIONS as exc:\n self.init_future.set_exception(exc)\n\n return None", "def __init__(self):\n # Global attributes\n self.ON = {\"RED\":[0], \"GREEN\":[2], \"YELLOW\":[4], \"BLINK\":[6], \"NORMAL\":[2], \"WARNING\":[2,6], \"CRITICAL\":[4], \"ERROR\":[0]}\n self.OFF = {\"RED\":[1], \"GREEN\":[3], \"YELLOW\":[5], \"BLINK\":[5], \"NORMAL\":[3], \"WARNING\":[3,5], \"CRITICAL\":[5], \"ERROR\":[1]}\n\n # Indicator topic\n topic = rospy.get_param(rospy.get_name() + \"/indicator_topic\", \"/tower_lights_cmd\")\n # Namespace fixing\n if (topic[0] != '/'): topic = rospy.get_name() + \"/\" + topic\n\n # Starting publisher\n self.indicator_publisher = rospy.Publisher(topic, Int32, queue_size=100)\n rospy.sleep(0.8) # Publisher initialization tiom\n\n # Turn off all indications\n for state in self.OFF:\n for cmd in self.OFF[state]:\n self.publish_cmd(cmd)\n \n # Start indicator thread\n self.event = threading.Condition()\n thread = threading.Thread(target=self.indicator_thread)\n thread.start()\n\n # Initialize default indication\n self.current_indication = \"NORMAL\"\n self.indication = \"NORMAL\"\n for i in self.ON[self.current_indication]:\n self.publish_cmd(i)", "def __init__(self):\n config = self.read_config()\n self.deployment = config['deployment']\n self.deployment_config = config[self.deployment]\n logger.info(f'Initializing storage client with the {self.deployment} deployment config {pformat(self.deployment_config)}')\n\n # get the MLOS config from the user else default it from the deployment config file\n # self.mlos_config = config['MLOS']\n # logger.info(f'Initializing storage client with the MLOS config {pformat(self.mlos_config)}')\n\n # setup the mount path\n if self.deployment == \"LOCAL\":\n self.mount_dir = self.setup_mount()\n logger.info(f'Mount directory setup completed: {self.mount_dir}')", "def __init__(self, hass):\n self.hass = hass\n self._volume = 0\n self._state = STATE_OFF", "def __init__(self, hass):\n self.hass = hass\n self._volume = 0\n self._state = STATE_OFF", "def setUp(self):\n self.platform = wirelesstagpy.WirelessTags(username=USERNAME, password=PASSWORD)\n self.tag_outdoor = wirelesstagpy.SensorTag(MOCK.OUTDOOR_PROBE, self.platform)\n self.platform._tags[\"fake-1\"] = self.tag_outdoor # pylint: disable=protected-access", "def __init__(self, logger):\n\n # cwd = os.getcwd()\n # parent_dir = os.path.dirname(cwd)\n # self.logger = logger_variable(__name__, parent_dir + '/log_files/SensorData.log')\n\n # open serial port\n self.interrupt_pin = 19\n self.serialOpen = Serial('/dev/ttyACM1', 115200)\n\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.interrupt_pin, GPIO.OUT, initial=1)\n\n self.logger = logger", "def __init__(self):\n \n # Initialize logger\n self._log = logging.getLogger(\"OemGateway\")\n \n # Initialize variables\n self._data_buffer = []\n self._last_send = time.time()\n self._settings = {}" ]
[ "0.6457708", "0.64529383", "0.61910915", "0.604235", "0.60021144", "0.59438455", "0.59427816", "0.59158796", "0.59125113", "0.5882629", "0.58545095", "0.5852518", "0.5806404", "0.5801691", "0.58013505", "0.57857406", "0.5775398", "0.57515305", "0.573593", "0.57229674", "0.5712897", "0.57005817", "0.56772864", "0.5676447", "0.56543714", "0.56512475", "0.5649992", "0.5647747", "0.56327385", "0.56250566", "0.5624253", "0.5617689", "0.5598231", "0.55813265", "0.5578191", "0.5570919", "0.55517524", "0.5541415", "0.5538924", "0.55331796", "0.553102", "0.55214053", "0.55020726", "0.54918534", "0.54905415", "0.54902065", "0.5482773", "0.5478406", "0.54687923", "0.5465591", "0.5462672", "0.5458011", "0.54495007", "0.54454213", "0.5440609", "0.5438318", "0.5436346", "0.5434731", "0.5431029", "0.54236037", "0.5423174", "0.54225326", "0.5420044", "0.54134965", "0.541269", "0.5402173", "0.5399292", "0.5398854", "0.53975976", "0.5396446", "0.5386515", "0.53799254", "0.5375222", "0.5375035", "0.5369094", "0.53679556", "0.53568786", "0.5351214", "0.53466916", "0.53444225", "0.5339552", "0.5332023", "0.5328071", "0.5316617", "0.53161967", "0.5313317", "0.5308701", "0.5305977", "0.53017193", "0.52969396", "0.5296323", "0.52961934", "0.52927345", "0.5292237", "0.52888423", "0.5288252", "0.5288252", "0.5287939", "0.52781713", "0.52781093" ]
0.5603905
32
The default name for the binary sensor.
def name(self): return self.devname
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name(self):\n return f\"{DEFAULT_NAME}_{BINARY_SENSOR}\"", "def name(self):\n return f\"{self._name} {SENSOR_TYPES[self.sensor][0]}\"", "def name(self):\n return f\"{self.sensor_type['name']} ({self._mac[-5:]})\"", "def name(self):\n return f\"{self._name}_{self._sensor}\"", "def name(self):\n return self._sensor.name", "def getDefaultName(self): # real signature unknown; restored from __doc__\n pass", "def name(self):\n return self.device.device_data[self.device_id]['name'] + \\\n f' {self._sensor_type}'", "def name(self):\n return f\"{self._name} {self._sensor_name}\"", "def get_sensor_name(self):\n return self.data[1]", "def name(self) -> str:\n return f\"{self.platform_name} {self._sensor_name}\"", "def default_name(self):\n return '[' + self.__class__.__name__ + ']'", "def default_name(self):\n name = f\"Player {self.UID.split('-')[0]}\"\n return name", "def name(self):\n return f\"{habitica.DOMAIN}_{self._name}_{self._sensor_name}\"", "def name(self):\n if self._name == '':\n return self.default_name\n else:\n return self._name", "def LegacyName(self, default=None):\n return self.data.get('legacy_name', default)", "def Name(self, default=None):\n return self.data.get('name', default)", "def name(self) -> str:\n return self.config_name or self.host_name or self.dev_id or DEVICE_DEFAULT_NAME", "def name(self):\n return f\"{self._tc_object.name} {SENSOR_TYPES[self.type][0]}\"", "def name(self):\n return f\"{get_device_name(self._data, 0, self._name)}\"", "def get_default_sensor_type():\n return get_sensor_type_id(DEFAULT_SENSOR_TYPE)", "def name(self):\n if self._name is None:\n return(self.default_name)\n else:\n return(self._name)", "def name(self):\n return f\"{self.device_name} {self.device_variable}\"", "def name(self):\n if ( self._typeSensor == _production):\n name = \"myEnedis.%s.production\" %(self._myDataSensorEnedis.get_PDL_ID())\n else:\n name = \"myEnedis.%s\" %(self._myDataSensorEnedis.get_PDL_ID())\n return name", "def friendly_name(self):\n return self._sensor_name", "def name(self):\n return str()", "def name(self):\n # self._name = \"wyzeapi_\"+self._device_mac+\"_\"+ self._name\n return self._device.nickname", "def name(self) -> str:\n return self._device.name or self._device.mac", "def name(cls):\n return None", "def get_name(self):\n return None", "def name() -> str:\n pass", "def name():\n pass", "def name():\n pass", "def name(self) -> str:\n return self.dev.label", "def name(self):\n return f\"{super().name} Battery\"", "def get_name() -> str:\n pass", "def getName(self):\n return \"\"", "def name(self):\n return f'{self._vehicle.name} {self.wan_name} Signal'", "def name(self):\n return None", "def name(self):\r\n return None", "def name(self):\n return self._device.device_data[self._uuid]['name']", "def name(self):\n return self.config.get('name') or f\"{self.id.replace('_', ' ').title()}\"", "def name(self):\n return self.config.get('name') or f\"{self.id.replace('_', ' ').title()}\"", "def default_label(self) -> str:\n return self.settings[\"default_label\"]", "def name(self):\n return self.robot.name + ' ' + SWITCH_TYPES[self.type][0]", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def define_name(self):\n\n self._name = 'dwi-noddi'", "def name(self) -> str:\n\t\traise NotImplementedError", "def name(self):\n return f'{self._config[CONF_NAME]} {self._typeconf[\"name\"]}'", "def get_name(self) -> str:\n pass", "def name(self) -> str:\n station_name = self._get_station_name()\n return f\"{station_name} {self._fuel_type}\"", "def name ( self ) :\n return self.__name if self.__name else ''", "def name(self):\n raise NotImplementedError # pragma: no cover", "def name(self):\n return 'Null'", "def get_name():", "def name(self):\n pass", "def name(self):\n return self._device.name", "def name(self):\n return self._device.name", "def name(self):\n return self._device.name", "def name():\n raise NotImplementedError", "def name():\n raise NotImplementedError", "def get_name(self):\n name_str = \"Brain\"\n name_str += \"_\" + self._memory.get_name() \n name_str += \"_ImgSize\" + str(self._img_size[0])\n name_str += \"_Nov\" + self._novelty_loss_type.upper()\n name_str += \"_Train\" + str(self._train_epochs_per_iter)\n name_str += \"_Lrate\" + str(self._learning_rate)\n return name_str", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self):\n return self.device.device_data[self.device_id]['name']", "def name(self):\n return self._name if self._name is not None else (\"0x%x\" % id(self))", "def default_name():\n return \"object{}\".format(Object.TOTAL_OBJECTS + 1)", "def name(self):\n if self._connection.location_names:\n return '{} {} {}'.format(self._device.location2, self._device.location, self._device.name)\n else:\n return self._device.name", "def name(self):\n return f\"PoolSense {BINARY_SENSORS[self.info_type]['name']}\"", "def name(self):\n return self._base.name", "def name(self):\n return self.device.name()", "def name(self):\r\n pass", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def get_name(self):\n pass", "def get_name(self):\n pass", "def name(self) -> str | None:\n pass", "def name(self) -> str:\n raise NotImplementedError", "def name(self) -> str:\n raise NotImplementedError()", "def _create_name(self) -> str:\n return self.stream.__class__.__name__", "def get_name(self) -> str:\n raise NotImplementedError", "def name(self):\n if not self._name:\n prefix = self.random.choice(['Desktop'] * 4 + ['Laptop'])\n self._name = '{}-{}'.format(prefix, ''.join(\n self.random.choice(string.ascii_uppercase + string.digits) for _ in range(7)))\n return self._name", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError" ]
[ "0.88304996", "0.7638713", "0.7363179", "0.73616314", "0.7301961", "0.7292507", "0.7264081", "0.7222781", "0.71043813", "0.70989096", "0.7048955", "0.7047866", "0.70316464", "0.701503", "0.69721663", "0.6962419", "0.6932336", "0.6925805", "0.6794133", "0.67933375", "0.6758342", "0.66250587", "0.651088", "0.6510359", "0.6505337", "0.6485595", "0.6465294", "0.6439318", "0.641589", "0.6412369", "0.6405634", "0.6405634", "0.63938355", "0.63918304", "0.6389657", "0.6389556", "0.6370057", "0.6369323", "0.6350224", "0.6338894", "0.6337681", "0.6337681", "0.6333663", "0.63202816", "0.6304005", "0.6304005", "0.6304005", "0.6304005", "0.6300747", "0.6300391", "0.62978536", "0.6289581", "0.62848645", "0.6268498", "0.6266008", "0.62609094", "0.6256421", "0.6253092", "0.6250601", "0.6250601", "0.6250601", "0.6246399", "0.6246399", "0.6244284", "0.62400174", "0.62400174", "0.62400174", "0.62400174", "0.62400174", "0.62397575", "0.623646", "0.62352973", "0.622941", "0.62249583", "0.6215581", "0.6211411", "0.6202246", "0.62002313", "0.62002313", "0.62002313", "0.62002313", "0.62002313", "0.62002313", "0.61993676", "0.61993676", "0.619259", "0.61924446", "0.6187689", "0.6181795", "0.61742294", "0.6167042", "0.6163514", "0.6163514", "0.6163514", "0.6163514", "0.6163514", "0.6163514", "0.6163514", "0.6163514", "0.6163514", "0.6163514" ]
0.0
-1
Fire an event with the data that have changed. This method is called when there is an incoming packet associated with this platform.
def value_changed(self, value, value2): self.update_ha_state() if value2 == 0x70: self.which = 0 self.onoff = 0 elif value2 == 0x50: self.which = 0 self.onoff = 1 elif value2 == 0x30: self.which = 1 self.onoff = 0 elif value2 == 0x10: self.which = 1 self.onoff = 1 self.hass.bus.fire('button_pressed', {"id": self.dev_id, 'pushed': value, 'which': self.which, 'onoff': self.onoff})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_changed(self):\n self.data_changed_signal.emit(self)", "def notify(self, packet):\n\t\tself.update_listeners(packet)", "def on_state_notification(self, data):\n\n self.channel_data.update(data)\n\n # synchronize DataManager data with processed update & entity data\n self.sync_data_update_ha()", "def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True", "def data_changed(self):\n return", "def on_data(self, event_data: LivisiEvent) -> None:\n if event_data.onState is not None:\n async_dispatcher_send(\n self.hass,\n f\"{LIVISI_STATE_CHANGE}_{event_data.source}\",\n event_data.onState,\n )\n if event_data.isReachable is not None:\n async_dispatcher_send(\n self.hass,\n f\"{LIVISI_REACHABILITY_CHANGE}_{event_data.source}\",\n event_data.isReachable,\n )", "def MyDataChangedCallback(self, inRefcon):\r\n pass", "def datachange_notification(self, node: Node, val, data):\n _logger.info('datachange_notification %r %s', node, val)", "def __flight_data_handler(self, event, sender, data):\n self.battery = data.battery_percentage\n self.fly_mode = data.fly_mode\n self.throw_fly_timer = data.throw_fly_timer\n self.throw_ongoing = data.throw_fly_timer > 0\n\n if self.prev_flight_data != str(data):\n print(data)\n self.prev_flight_data = str(data)\n self.flight_data = data\n\n if self.is_flying != data.em_sky:\n self.is_flying = data.em_sky\n log.debug(f\"FLYING : {self.is_flying}\")\n if not self.is_flying:\n self.reset()\n else:\n if self.tracking_after_takeoff:\n log.info(\"Tracking on after takeoff\")\n self.toggle_tracking(True)\n\n # if self.write_header_log:\n # self.write_header_log = False\n # self.log_file_log.write(f\"{data.format_cvs_header()}\\n\")\n # self.log_file_log.write(f\"{data.format_cvs(0)}\\n\")", "def data_received(self, data):\n pass", "def XPLMDataChanged_f(inRefcon):", "def update(self, packet):\n raise NotImplementedError", "def change_data(self):\n\n if self.changed is not True:\n self.changed = True\n print('True')", "def fire(self, data={}):\n event_data = {\n 'event': 'CameraUpdated',\n 'component_id': self.id,\n 'name': self.name,\n 'updated_at': str(datetime.datetime.now().replace(microsecond=0)),\n 'state': self.state,\n 'last_image': self.last_image,\n }\n event_data.update(data)\n self.mudpi.events.publish(NAMESPACE, event_data)", "def on_new_data(self, data):\n raise NotImplementedError()", "def write_data(self, data):\n # send data\n for ptr, value in data:\n self._write_byte(ptr, value)\n # set 'data changed'\n self._write_byte(fixed_format['data_changed'][0], 0xAA)\n # wait for station to clear 'data changed'\n while True:\n ack = _decode(self._read_fixed_block(0x0020),\n fixed_format['data_changed'])\n if ack == 0:\n break\n log.debug('waiting for ack')\n time.sleep(6)", "def data_received(self, data):\n # self.debug(\"received data=%r\", binascii.hexlify(data))\n self.dispatcher.add_data(data)", "def dataReceived(self, data):\n print \"received:\", data", "def dataReceived(self, data):", "def __handle_pkt_event(self, current_client, data):\n self.__logger.debug(\n \"Got event from the client {}: {}\".format(current_client.addr, data))\n\n # TODO: Save the event to the DB\n self.__broadcast_events(current_client, [wrap_event(data)])", "def _handle_MonitorData (self, event, packet, reverse):\n pass", "def __itemChanged(self, event):\n if event in (items.ItemChangedType.DATA, items.ItemChangedType.MASK):\n self._updateFromItem()", "def update(self, _event):\n print self.get_name(), \"received event\", _event", "def set_changed(self, value=0):\n self.data_changed.emit(value)\n self._changed = True", "def rDataChanged(self):\n\n self._queues.uResolutionTab.refreshData()\n self._layerManager.updateReviewLayer()", "def datachange_notification(self, node, val, data):\n \n logger.debug(\"New data change event. node:{}, value:{}\".format(node, val))\n \n # Sorry about these lines of code, but I don't see any nicer way of determining the port number than from \n # the identifier string. Then splitting it up to isolate the port number.\n # Example \"Status.Port_2.Selected\" is split into ['Status', 'Port_2', 'Selected'] then 'Port_2' is split into \n # ['Port', '2'] and then the '2' is turned into an intiger.\n path_list = str(node.nodeid.Identifier).split(\".\")\n\n # We can safely assume that the last term is the tag that updated.\n tag = path_list[-1] \n \n # Figure out the port number\n port_number = None\n if 'Port' in path_list[1]:\n port_number = int(path_list[1].split(\"_\")[-1]) \n \n \"\"\" Switch for each possible tag\"\"\"\n # If the command tag \"Select\" changes go select that port with the instructions saved in the command tag. \n if tag == 'Select' and port_number:\n if val == True:\n node = self.ua_server.get_node(\"ns=2;s=Command.Port_{}.Instructions\".format(port_number))\n instructions = node.get_value()\n self._pbl.select_port(port_number, instructions=instructions)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.Port_{}.Select\".format(port_number))\n node.set_value(False)\n \n elif tag == 'Deselect' and port_number:\n if val == True:\n self._pbl.deselect_port(port_number, work_finished=True)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.Port_{}.Deselect\".format(port_number))\n node.set_value(False)\n\n elif tag == 'ContentDisplayName' and port_number:\n self._pbl.set_content_key(port_number,'display_name', str(val))\n elif tag == 'ContentName' and port_number:\n self._pbl.set_content_key(port_number,'name', str(val))\n elif tag == 'ContentDescription' and port_number:\n self._pbl.set_content_key(port_number,'description', str(val))\n elif tag == 'ContentImagePath' and port_number:\n self._pbl.set_content_key(port_number,'image_path', str(val))\n \n elif tag == 'Select' and 'ByContent' in path_list[1]:\n if val == True:\n instructions = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Instructions\").get_value()\n name = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Name\").get_value()\n _, selected_port = self._pbl.select_content(name = name, instructions=instructions)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Select\")\n node.set_value(False)\n node = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Result\")\n node.set_value(selected_port)\n\n elif tag == 'Deselect' and 'ByContent' in path_list[1]:\n if val == True:\n name = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Name\").get_value()\n self._pbl.deselect_content(name = name, work_finished=True)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Deselect\")\n node.set_value(False)", "def callback(self, packet, sender=None):\n pass", "def receive(self, event: str, sender: object, **kwargs) -> None:\n if self._lookup is not None:\n if event == \"HILBERTSPACE_UPDATE\" and sender is self._hilbertspace:\n self._lookup._out_of_sync = True\n # print('Lookup table now out of sync')\n elif event == \"PARAMETERSWEEP_UPDATE\" and sender is self:\n self._lookup._out_of_sync = True\n # print('Lookup table now out of sync')", "def fDataChanged(self):\n\n self._layerManager.getAimsFeatures()", "def on_push(self, data):\n if data[\"type\"] == \"push\":\n self._data = data[\"push\"]", "def on_data(data: dict):\n if 'symbol' in data and 'messageType' in data:\n if data['symbol'] == 'AAPL' and data['messageType'] == 'tradingstatus':\n print(\"APPL initial message received\")\n event1.set()\n if data['symbol'] == 'KPTI' and data['messageType'] == 'tradingstatus':\n print(\"KPTI initial message received\")\n event2.set()", "def new_loop_packet(self, event):\n # packet has traveled back in time\n if self.end_ts > event.packet['dateTime']:\n self.logger.error(\"Service ignoring packet has dateTime of %f which is prior to previous packet %f\"\n %(event.packet['dateTime'], self.end_ts))\n else:\n start_ts = self.end_ts\n self.end_ts = event.packet['dateTime']\n\n for topic in self.subscriber.subscribed_topics: # topics might not be cached.. therefore use subscribed?\n self.logger.debug(\"Service packet prior to update is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(event.packet['dateTime']),\n to_sorted_string(event.packet)))\n target_data = self.subscriber.get_accumulated_data(topic,\n start_ts, self.end_ts, event.packet['usUnits'])\n event.packet.update(target_data)\n self.logger.debug(\"Service packet after update is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(event.packet['dateTime']),\n to_sorted_string(event.packet)))", "def dataReceived(self, data):\n if not self.disconnected:\n self.protocol.dataReceived(data)", "def _data_updated_callback(self, attr, old, new):\n pass", "def data_input_changed(self):\n self.message.data = self.dataInput.toPlainText()\n self.validate_data_input(self.message.dlc)", "def _async_process_data(self):\n _LOGGER.debug(\"Update switch called\")\n\n data = self._api.get_device_data(self._dev_id)\n\n if not data:\n _LOGGER.error(\"Received no data for device %s\", self._name)\n self.async_write_ha_state()\n return\n\n if \"relay\" in data:\n self._is_on = data[\"relay\"]\n\n self.async_write_ha_state()", "def receive(self, event, sender, **kwargs):\n if self.lookup is not None:\n if event == 'HILBERTSPACE_UPDATE' and sender is self._hilbertspace:\n self._lookup._out_of_sync = True\n # print('Lookup table now out of sync')\n elif event == 'PARAMETERSWEEP_UPDATE' and sender is self:\n self._lookup._out_of_sync = True\n # print('Lookup table now out of sync')", "def flight_data_handler(self, event, sender, data):\n self.battery = data.battery_percentage\n self.fly_mode = data.fly_mode\n self.throw_fly_timer = data.throw_fly_timer\n self.throw_ongoing = data.throw_fly_timer > 0\n\n # print(\"fly_mode\",data.fly_mode)\n # print(\"throw_fly_timer\",data.throw_fly_timer)\n # print(\"em_ground\",data.em_ground)\n # print(\"em_sky\",data.em_sky)\n # print(\"electrical_machinery_state\",data.electrical_machinery_state)\n #print(\"em_sky\",data.em_sky,\"em_ground\",data.em_ground,\"em_open\",data.em_open)\n #print(\"height\",data.height,\"imu_state\",data.imu_state,\"down_visual_state\",data.down_visual_state)\n if self.is_flying != data.em_sky: \n self.is_flying = data.em_sky\n log.debug(f\"FLYING : {self.is_flying}\")\n if not self.is_flying:\n self.reset()\n else:\n if self.tracking_after_takeoff:\n log.info(\"Tracking on after takeoff\")\n self.toggle_tracking(True)\n \n log.debug(f\"MODE: {self.fly_mode} - Throw fly timer: {self.throw_fly_timer}\")", "def handle_input(self):\n difference = self.check_state()\n if not difference:\n return\n self.events = []\n self.handle_new_events(difference)\n self.update_timeval()\n self.events.append(self.sync_marker(self.timeval))\n self.write_to_pipe(self.events)", "def triggerPacket(self, ctx):\n return self.dev.trigger(ctx)", "def changed(self, event: Event):\n\n for observer in self._observers:\n observer.on_change(event)", "def update(self, new_gameStateData):\r\n pass", "def on_data(self, session, byte_data):\n pass", "def on_entity_update(self, event):\n self.entity.on_entity_update(event)", "def handle_actual_updated(self):\n self._actual_updated()", "def handle_update(self, call):\n self.fire_event(EVENT_UPDATE)", "def do_on_input_update(self, msg_id, payload, player):\n pass", "def changed_event(self):\n return True", "def _on_host_msg_received(self, data):\n self._log.info(\"Received a message from the host: {}\".format(data))\n data = json.loads(data)", "def on_data(self, event):\n if not self.quitting:\n self.the_server.process_ready_socks([event.socket_ID])", "def updateData(self):\n self.needsData.emit(self.property(\"number\"))", "def update(self, data: bytes):\n self.send(data)", "def process_event(self, event):\n if not self.frozen:\n if event[\"event\"] in [self.event, self.devent]:\n if self.what is None or event[\"target\"].startswith(self.what):\n self.lastone = None\n if event[\"event\"] == self.event:\n val = event\n for key in self.subval:\n val = val[key]\n if val in self.addV:\n if event[\"target\"] not in self.entities:\n self.entities.add(event[\"target\"])\n self.lastone = event[\"target\"]\n if event[\"event\"] == self.devent:\n val = event\n for key in self.dsubval:\n val = val[key]\n if val in self.subV:\n try:\n self.entities.remove(event[\"target\"])\n self.lastone = event[\"target\"]\n except:\n pass\n if self.lastone is not None and bridgectl.log:\n bridgectl.log.debug(\n \"New value for {} is {}\".format(\n self.name,\n self.entities))\n if event['event'] == 'time tick':\n if self.period in event[\"starts\"]:\n self.reset()", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def listen_and_send(self):\n hadEvent = False\n\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value, 2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n if event.type == pygame.JOYBUTTONDOWN:\n # A button on the joystick just got pushed down\n hadEvent = True\n elif event.type == pygame.JOYAXISMOTION:\n # A joystick has been moved\n hadEvent = True\n\n if hadEvent:\n\n # If platform is linux we need to change some values in axis_data\n os.system('clear')\n print(\"Axis before\")\n pprint.pprint(self.axis_data)\n if sys.platform == 'linux':\n #self.axis_data[2], self.axis_data[3], self.axis_data[4] = self.axis_data[4], self.axis_data[2], self.axis_data[3]\n temp2 = self.axis_data[2]\n temp3 = self.axis_data[3]\n temp4 = self.axis_data[4]\n self.axis_data[2] = temp4\n self.axis_data[3] = temp2\n self.axis_data[4] = temp3\n\n\n self.event_dict['axis'] = self.axis_data\n self.event_dict['button'] = self.button_data\n message = pickle.dumps(self.event_dict, protocol=4)\n message = bytes(f\"{len(message):<{HEADERSIZE}}\", 'utf-8') + message\n self.sock.sendall(message)\n\n #if self.button_data[4]:\n # self.verbose = not self.verbose\n\n if self.verbose:\n\n # print(\"Button \")\n # pprint.pprint(self.button_data)\n print(\"Axis \")\n pprint.pprint(self.axis_data)\n # print(\"Motion \")\n # pprint.pprint(self.hat_data)", "def notify(self):\n if self.has_changed:\n event = self.event_factory(self.resource, registry=self.registry, schema=self.schema,\n changed=self.changed)\n self.registry.notify(event)", "def on_data(self, data):\n # store the data\n self._storage.append(data)", "def changedUpdate(self, e):\n syncJSONtoUI()", "def notify_observers(self, new_gamestate) -> None:", "def update_listeners(self, packet):\n\t\tpacket.module_id = self.id\n\t\tself._notify_listeners(packet)", "def notify(self, data):\n\n if 'personId' in data.keys():\n person_id = data['personId']\n if data['type'] == EventTimeLine.PERSON_CREATION:\n self._registry[person_id] = {\n 'name': data['name'],\n 'address': data['address'],\n 'status': data['status'],\n 'version': 1\n }\n\n if data['type'] == EventTimeLine.PERSON_STATUS_CHANGE:\n p = self._registry[person_id]\n p['status'] = data['newStatus']\n p['version'] += 1\n\n if data['type'] == EventTimeLine.PERSON_MOVE:\n p = self._registry[person_id]\n p['address'] = data['newAddress']\n p['version'] += 1", "def notify(self, event):\n raise NotImplementedError", "def outReceived(self, data):\n self.protocol.dataReceived(data)", "def _on_packet_motor_encoder(self, packet):\n try:\n packet_dict = self.get_packet_dict(packet)\n if not packet_dict:\n return\n except (ValueError, TypeError) as e:\n return\n\n channel = packet_dict['channel']\n count = packet_dict['count']\n if channel == 0:\n self.wheel_tracker.update_left(count)\n else:\n self.wheel_tracker.update_right(count)", "def update(self):\n self.events.update()", "def event_data(self, event_data):\n\n self._event_data = event_data", "def notify_change(self, change):\n # Send the state to the frontend before the user-registered callbacks\n # are called.\n name = change['name']\n if self.comm is not None and getattr(self.comm, 'kernel', True) is not None:\n # Make sure this isn't information that the front-end just sent us.\n if name in self.keys and self._should_send_property(name, getattr(self, name)):\n # Send new state to front-end\n self.send_state(key=name)\n super().notify_change(change)", "def reload_data(self):\n super(UpdateMessage, self).reload_data()\n self._previous_avro_payload.reload_data()", "def on_update(self, delta_time):\n pass", "def on_update(self, delta_time):\n pass", "def on_event(self, event):\r\n\r\n print(\"on event called, event:\", event)\r\n\r\n self.state = self.state.on_event(event)\r\n publish_state_msg(state_msg, odrive_bridge.get_state())", "def event_in_cb(self, msg):\n self.event = msg.data", "def handle_packet_received(self, packet):\r\n log.debug(packet)\r\n self._process_packet(packet)\r\n self.emit(packet.fctype, packet)\r\n self.emit(FCTYPE.ANY, packet)", "async def data_received(self, data: bytes):\n logging.info('received: %s' % data.decode())", "def update_data():\n pass", "def on_message(self, message):\n log.debug(\"Protocol got message {message}\", message=message)\n if message['type'] == \"change\":\n self.handler.process_packet(message['packet'])\n self.send_packet()\n elif message['type'] == \"chat\":\n self.on_chat_message(message)\n elif message['type'] == \"action\":\n self.on_action(message)\n else:\n log.warn(\"Unrecognized message type {type}\", type=message['type'])", "def data_received(self, event):\n stream_id = event.stream_id\n\n log.debug(\"data received on stream %s: %s...\", stream_id, event.data[:100])\n receive_stream = self.receive_streams.get(stream_id)\n if receive_stream is None:\n try:\n self.conn.reset_stream(stream_id, error_code=ErrorCodes.PROTOCOL_ERROR)\n except StreamClosedError:\n pass\n return\n\n receive_stream.write(event.data)\n self.conn.acknowledge_received_data(event.flow_controlled_length, stream_id)", "def on_update(self):\n raise NotImplemented(\"on_update method should be implemented.\")", "def onMarketUpdate(self, data):\n pass", "def packetReceived(self, packet):\n for layer in packet:\n if (layer.layer_name == 'fmtp' and\n int(layer.type) == 1):\n # Data is stored as a hexadecimal string in the XML file\n # generated by tshark\n data = binascii.unhexlify(layer.data)\n log.msg(\"FMTP message received: {}\".format(data))", "def _notify_new_log(self, data):\n # Notify listeners\n for lstnr in self.listeners:\n lstnr.new_log_data(self, data)", "def update(self):\n\n self.check_events()", "def process_event(self, event):\n if not self.frozen:\n if event[\"event\"] == self.event:\n if self.what is None or event[\"target\"].startswith(self.what):\n self._varstate = event\n try:\n for key in self.subval:\n self._varstate = self._varstate[key]\n\n if bridgectl.log:\n bridgectl.log.debug(\n \"New value for {} is {}\".format(\n self.name,\n self._varstate))\n except Exception as e:\n if bridgectl.log:\n bridgectl.log.critical(\n \"Failed to process event for {}\".format(\n self.name),\n exc_info=(type(e),\n e,\n e.__traceback__))\n pass\n if event['event'] == 'time tick':\n if self.period in event[\"starts\"]:\n self._varstate = self.reset()", "def update_data(self, **kwargs):\n self.source_data = self.get_dict()\n for c in self.callbacks[\"update_data\"]:\n c()", "def collect_incoming_data(self, data):\n self.l.debug('data -> (%d bytes):\"%s\"', len(data), data)\n self.received_data.append(data)", "def on_push(self, payload):\n pass", "def _on_packet_imu_accelerometer(self, packet):\n\n try:\n packet_dict = self.get_packet_dict(packet)\n if not packet_dict:\n return\n except (ValueError, TypeError) as e:\n return\n\n self.last_accelerometer = packet_dict\n\n self.received_imu = True", "def callback(self, data):\n self.state = data.data\n #rospy.loginfo('HEARD')", "def datagramReceived(self, data):\n raise NotImplementedError()", "def receive(self, timestamp, whoiam, packet):\n raise NotImplementedError(\"Please override this method when subclassing RobotObjectCollection\")", "def onFlowUpdate(self, event):", "def data_received(self, data):\n self.log.debug('data_received: {!r}'.format(data))\n self._last_received = datetime.datetime.now()\n for byte in (bytes([value]) for value in data):\n\n try:\n self.stream.feed_byte(byte)\n except (ValueError, AssertionError):\n e_type, e_value, _ = sys.exc_info()\n map(self.log.warn,\n traceback.format_exception_only(e_type, e_value))\n continue\n\n if self.stream.is_oob:\n continue\n\n # self.reader.feed_byte()\n self.shell.feed_byte(byte)", "def on_watch(self, payload):\n pass", "def notify(self, event):\n\n self.send_json(event[\"payload\"])", "def notify_device_changes(self, device, attribute_name, old_value, new_value):\n if not self.configured:\n return\n\n self.machine.bcp.transport.send_to_clients_with_handler(\n handler=\"_devices\",\n bcp_command='device',\n type=device.class_label,\n name=device.name,\n changes=(attribute_name, Util.convert_to_simply_type(old_value), Util.convert_to_simply_type(new_value)),\n state=device.get_monitorable_state())", "def rawDataReceived(self, data):\n self._buffer.append(data)\n self._bufferLength += len(data)\n\n if self._bufferLength >= self._expectedLength:\n receivedData = ''.join(self._buffer)\n expectedData = receivedData[:self._expectedLength]\n extraData = receivedData[self._expectedLength:]\n\n self._buffer = None\n self._bufferLength = None\n self._expectedLength = None\n\n self.datagramReceived(expectedData)\n self.setLineMode(extraData)", "def handle_input(self, event):\n self.update_timeval()\n self.events = []\n code = self._get_event_key_code(event)\n\n if code in self.codes:\n new_code = self.codes[code]\n else:\n new_code = 0\n event_type = self._get_event_type(event)\n value = self._get_key_value(event, event_type)\n scan_event, key_event = self.emulate_press(\n new_code, code, value, self.timeval)\n\n self.events.append(scan_event)\n self.events.append(key_event)\n # End with a sync marker\n self.events.append(self.sync_marker(self.timeval))\n # We are done\n self.write_to_pipe(self.events)", "def update(self, new_gameStateData):\r\n self.data = new_gameStateData\r\n self._refresh()" ]
[ "0.70797074", "0.68603", "0.6457842", "0.6428471", "0.64076215", "0.63471305", "0.63120216", "0.62468994", "0.62399685", "0.6223958", "0.61906064", "0.618284", "0.61713535", "0.6107015", "0.6090246", "0.597505", "0.59542114", "0.5942936", "0.59262264", "0.5895548", "0.5893129", "0.588955", "0.5876569", "0.58615005", "0.585497", "0.5847317", "0.5836464", "0.58035684", "0.5801525", "0.57820165", "0.5767553", "0.576277", "0.57552546", "0.57410806", "0.5740662", "0.57380116", "0.5734382", "0.571512", "0.5696357", "0.5694953", "0.56925917", "0.5682251", "0.5669847", "0.5654908", "0.565222", "0.5651589", "0.5638716", "0.5626039", "0.5623645", "0.5615306", "0.5610807", "0.5607677", "0.55907094", "0.5577547", "0.5577547", "0.5577547", "0.5577547", "0.5577526", "0.55771536", "0.5565551", "0.5560693", "0.55520886", "0.55474454", "0.55470014", "0.5544631", "0.5539097", "0.5538887", "0.55386347", "0.5537901", "0.5535967", "0.55267435", "0.5517438", "0.5517438", "0.5515676", "0.551294", "0.55027735", "0.5494563", "0.548145", "0.5479539", "0.547454", "0.5457424", "0.5448292", "0.54427856", "0.544206", "0.5434665", "0.54302436", "0.542166", "0.5416855", "0.5412546", "0.54114753", "0.54102737", "0.540572", "0.53943384", "0.53902465", "0.5387722", "0.53838646", "0.5383076", "0.53771627", "0.53761935", "0.5374104", "0.5370028" ]
0.0
-1
Construct an empty ARFF structure.
def __init__(self): self.relation = '' self.attributes = [] self.attribute_types = dict() self.attribute_data = dict() self.comment = [] self.data = [] pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_empty_trace(self):\n self.data = np.zeros(0, dtype=np.float32)\n self.header = SEGYTraceHeader(header=None, endian=self.endian)", "def __init__(self):\n self._header = self._Node(None, None, None)\n self._trailer = self._Node(None, None, None)\n self._header._next = self._trailer # trailer is after header\n self._trailer._previous = self._header # header is before trailer\n self._size = 0 # set size to 0", "def _create_empty(cls):\n self = object.__new__(cls)\n self.avatar_hash = 0\n self.avatar_type = IconType.none\n self.boosts_since = None\n self.flags = GuildProfileFlag()\n self.joined_at = None\n self.nick = None\n self.pending = False\n self.role_ids = None\n self.timed_out_until = None\n return self", "def __init__(self):\n self._header = self._Node(None, None, None)\n self._trailer = self._Node(None, None, None)\n self._header._next = self._trailer\n self._trailer._prev = self._header\n self._size = 0", "def _init_empty_polyhedron(self):\n self._ambient_dim = 0\n\n self._Vrepresentation = Sequence([])\n self._Vrepresentation.set_immutable()\n \n self._Hrepresentation = Sequence([])\n Equation(self, [-1]);\n self._Hrepresentation.set_immutable()\n\n self._V_adjacency_matrix = matrix(ZZ, 0, 0, 0)\n self._V_adjacency_matrix.set_immutable()\n\n self._H_adjacency_matrix = matrix(ZZ, 1, 1, 0)\n self._H_adjacency_matrix.set_immutable()", "def _create_empty_segy_file_object(self):\n self.textual_file_header = b''\n self.binary_file_header = None\n self.traces = []", "def __init__(self):\n\n self._header = self._Node(None, None, None)\n self._trailer = self._Node(None, None, None)\n self._header._next = self._trailer # trailer is the next node after header\n self._trailer._prev = self._header # header is the node before trailer\n self._size = 0 # keep track of the number of elements", "def empty(cls) -> BodyStructure:\n return _EmptyBodyStructure()", "def empty_copy(self) -> 'FqeData':\n new_data = FqeData(nalpha=self.nalpha(),\n nbeta=self.nbeta(),\n norb=self._core.norb(),\n fcigraph=self._core,\n dtype=self._dtype)\n new_data._low_thresh = self._low_thresh\n new_data.coeff = numpy.zeros_like(self.coeff)\n return new_data", "def __init__(self, full=False):\n self.full = full", "def test_init_empty(self):\n # NOTE: ModelSequences can't be initialized empty because it screws up\n # the dimensions of the array, and not worth special-casing.\n s = self.SEQ()\n self.assertEqual(s, \"\")\n assert s.moltype in (ASCII, BYTES)\n\n r = self.RNA()\n assert r.moltype is RNA", "def empty(cls) -> EnvelopeStructure:\n return _EmptyEnvelopeStructure()", "def build_empty(self):\n return self.art_type.empty()", "def _create_empty_binary_file_header(self):\n for _, name, _ in BINARY_FILE_HEADER_FORMAT:\n setattr(self, name, 0)", "def EMPTY(cls, tabledef, **kkw):\n rec = cls(**kkw)\n rec.setTable (tabledef)\n rec._isnew = True\n return rec", "def _init_empty(self):\n self._data = []", "def test_empty_structure():\n empty = SME_Struct()\n\n assert isinstance(empty.version, str)\n assert empty.teff is not None\n assert empty.logg is not None\n assert empty.vmic == 0\n assert empty.vmac == 0\n assert empty.vsini == 0\n\n assert empty.nseg == 0\n assert empty.wave is None\n assert empty.spec is None\n assert empty.uncs is None\n assert empty.synth is None\n assert empty.cont is None\n assert empty.mask is None\n assert empty.mask_good is None\n assert empty.mask_bad is None\n # assert empty.mask_line is None\n # assert empty.mask_continuum is None\n\n assert empty.cscale.shape == (0, 1)\n assert empty.vrad.shape == (0,)\n assert empty.cscale_flag == \"none\"\n assert empty.vrad_flag == \"none\"\n assert empty.cscale_degree == 0\n\n assert empty.mu is not None\n assert empty.nmu == 7\n\n # assert empty.md5 is not None\n\n assert empty.linelist is not None\n assert empty.species is not None\n assert len(empty.species) == 0\n assert empty.atomic is not None\n\n assert empty.monh == 0\n assert not np.isnan(empty[\"abund Fe\"])\n assert empty.abund[\"H\"] == 12\n assert not np.isnan(empty.abund()[\"Mg\"])\n\n assert empty.system_info is not None\n assert empty.system_info.arch == \"\"\n\n assert len(empty.fitparameters) == 0\n assert empty.fitresults is not None\n assert empty.fitresults.covariance is None\n\n assert empty.atmo is not None\n assert empty.atmo.depth is None\n\n assert empty.nlte is not None\n assert empty.nlte.elements == []", "def empty():\n return CAT([], 0, 0, active=False)", "def empty(cls):\n x = cls(base_types=set(), template_types={}, refined_types={}, humannames={},\n type_aliases={}, cpp_types={}, numpy_types={}, from_pytypes={},\n cython_ctypes={}, cython_cytypes={}, cython_pytypes={},\n cython_cimports={}, cython_cyimports={}, cython_pyimports={},\n cython_functionnames={}, cython_classnames={}, cython_c2py_conv={},\n cython_py2c_conv={})\n del x.extra_types\n del x.dtypes\n del x.stlcontainers\n return x", "def init_incomplete(cls, build=True) -> object:\n return cls(tool.matrix_to_array(tool.test_incomplete), build=build)", "def empty_room():\n\troom_material = pra.Material(energy_absorption=0.6, scattering=None)\n\troom_faces = make_polygon(\n\t\tcentre=[0,0,2.5],\n\t\tradius=10,\n\t\theight=5,\n\t\tN=4,\n\t\trpy=[0,0,np.pi/4]\n\t)\n\n\t# create room\n\twalls = []\n\twalls.extend(create_walls(room_faces, room_material))\n\n\troom = pra.Room(walls, fs=fs, max_order=3, ray_tracing=True, air_absorption=False)\n\n\troom.add_source([0, 0, 2.])\n\troom.add_microphone([0, 0.2, 2.1])\n\n\t# compute rir\n\troom.image_source_model()\n\troom.ray_tracing()\n\troom.compute_rir()\n\n\treturn room", "def empty_fuselage():\n fus = Fuselage(construct_geometry=False)\n return fus", "def zero(self):\n return self.create()", "def _empty_event(self):\n event = self.event_from_template(self._fields)\n event.protocol = self._protocol\n event.subject = self._subject\n event.montage = self._montage\n event.experiment = self._experiment\n event.session = self._session\n return event", "def empty_like(self):\n res = type(self)(\n self.shape.copy(),\n qhape=self.qhape.copy(),\n qodulus=self.qodulus,\n dtype=self.dtype,\n defval=self.defval,\n invar=self.invar,\n charge=self.charge,\n dirs=self.dirs.copy(),\n )\n return res", "def Empty():\n return Container(name='(empty)',\n metadata={},\n section_sizes={},\n metrics_by_file={})", "def f_empty(self):\n raise NotImplementedError(\"Should have implemented this.\")", "def _create_empty(cls, integration_application_id):\n self = object.__new__(cls)\n self.bot = ZEROUSER\n self.description = None\n self.icon_type = ICON_TYPE_NONE\n self.icon_hash = 0\n self.id = integration_application_id\n self.name = ''\n return self", "def make_empty(cls):\n args = inspect.getargspec(cls.__init__).args\n # remove self; always first arg of __init__\n args = args[1:]\n return cls(**dict.fromkeys(args))", "def __init__(self, filename=None,\n astrotarget=None,data_index=0,\n dataslice0=None,dataslice1=None,\n empty=False, **kwargs):\n self.__build__(data_index=data_index)\n\n if empty:\n return\n \n if filename is not None:\n force_it = kwargs.pop(\"force_it\",True)\n self.load(filename,force_it=force_it,\n dataslice0=dataslice0,\n dataslice1=dataslice1,\n **kwargs)\n # - Set the target if any\n if astrotarget is not None:\n self.set_target(astrotarget)", "def __init__(self):\n # Create an empty list of features\n self.features = []\n # Create an empty list of edges\n self.constraints = []", "def empty_board(self) -> Board:\r\n\r\n board = Board()\r\n board.birth_rule = self.rule.birth_rule\r\n board.remain_rule = self.rule.remain_rule\r\n board.empty_board(self.BOARD_HEIGHT, self.BOARD_WIDTH)\r\n\r\n return board", "def test_empty_init(self):\n sch = scheme.Scheme()\n\n assert len(sch.args) == 0\n assert sch._flat is None", "def rpConstruct(cls):\r\n return cls(None, None, None)", "def empty(cls):\n pass", "def __init__(self):\n self.ram = [0]*256\n self.reg = [0] * 8\n self.fl = 0b00000000\n self.pc = 0\n self.sp = 0xF3\n self.is_run = False\n self.call_flag = False\n pass", "def empty_struct_data(self):\n return self.empty_header_data() + bytes(self.pad_string(''))", "def _make_blank(cls) -> pd.DataFrame:\n spec = list(zip(cls._required_columns, cls._required_dtypes))\n try:\n arr = np.zeros(0, dtype=spec)\n return pd.DataFrame(arr)\n except TypeError as exc:\n raise TypeError(r\"{exc}: {spec}\") from exc", "def __init__(self, mode, all_fields, ignore_fields):\n self._keep_original = True\n if mode == ModeKeys.INFER \\\n and ignore_fields is not None \\\n and len(ignore_fields) > 0:\n self._keep_original = False\n self._reserved_fields = [x for x in all_fields\n if x not in ignore_fields]\n self._new_type = namedtuple(\"new_type\",\n self._reserved_fields)", "def zero(self) -> 'PFElement':\n return self(0)", "def __init__(self):\n # The bpq_kind and matching_rule are deliberately set illegal\n self.bpq_kind = -1\n self.matching_rule = -1\n self.creation_ts = 0\n self.creation_seq = 0\n self.src_eid_len = 0\n self.src_eid = None\n self.bpq_id_len = 0\n self.bpq_id = None\n self.bpq_val_len = 0\n self.bpq_val = None\n self.frag_cnt = 0\n self.frag_desc = []\n return", "def __init__(self: object) -> None:\n self.empty: bool = True\n self.episode_broadcast: str = \"\"\n self.episode_id: int = 0\n self.episode_inspectors: str = \"\"\n self.episode_name: str = \"\"\n self.episode_sequence: str = \"\"\n self.episode_url: str = \"\"\n self.episode_year: int = 0", "def clear(self):\n\n self.index = 1\n self.degen = 1.\n self.nnnn_out = False\n self.json_out = False\n self.verbose = False\n self.ipol = 0\n self.ellip = 0.\n self.nepts = 0\n self.genfmt_order = 2\n self.genfmt_vers = \"\"\n self.exch_label = \"\"\n self.rs = 0.\n self.vint = 0.\n self.xmu = 0.\n self.edge = 0.\n self.kf = 0.\n self.rnorman = 0.\n self.gamach = 0.\n self.nepts = FEFF_maxpts\n\n dargs = dict(dtype=np.float64, order='F')\n largs = dict(dtype=np.int32, order='F')\n\n self.evec = np.zeros(3, **dargs)\n self.xivec = np.zeros(3, **dargs)\n self.ipot = np.zeros(1+FEFF_maxleg, **largs)\n self.beta = np.zeros(1+FEFF_maxleg, **dargs)\n self.eta = np.zeros(2+FEFF_maxleg, **dargs)\n self.ri = np.zeros(FEFF_maxleg, **dargs)\n self.rat = np.zeros((3, 2+FEFF_maxleg), **dargs)\n self.iz = np.zeros(1+FEFF_maxpot, **largs)\n self.kfeff = np.zeros(FEFF_maxpts, **dargs)\n self.real_phc = np.zeros(FEFF_maxpts, **dargs)\n self.mag_feff = np.zeros(FEFF_maxpts, **dargs)\n self.pha_feff = np.zeros(FEFF_maxpts, **dargs)\n self.red_fact = np.zeros(FEFF_maxpts, **dargs)\n self.lam = np.zeros(FEFF_maxpts, **dargs)\n self.rep = np.zeros(FEFF_maxpts, **dargs)\n self.nleg = 1", "def make_no_init(self, mode=None):\r\n if mode is None:\r\n mode = theano.compile.mode.get_default_mode()\r\n memo = {}\r\n self.allocate(memo)\r\n rval = self.build(mode, memo)\r\n return rval", "def __init__(self):\n self.Name = None\n self.Type = None\n self.Signature = None\n self.Contract = None", "def _create_empty_trace_header(self):\n # First set all fields to zero.\n for field in TRACE_HEADER_FORMAT:\n setattr(self, field[1], 0)", "def __init__(self, filename, default_seq=None, key_function=None, as_raw=False, strict_bounds=False):\n self.filename = filename\n self.faidx = Faidx(filename, key_function=key_function, as_raw=as_raw,\n default_seq=default_seq, strict_bounds=strict_bounds)", "def create_empty_image(width=512, height=512):\n blank_img = np.zeros((width, height, 3), np.uint8)\n # Return instance of the class\n return ExtendedImage(blank_img)", "def _create_empty_su_file_object(self):\n self.traces = []", "def new(self):\n\n self.obj = self.factory()\n\n if self.textproperty is None:\n self.attributes = ElementHandler.load_definitions(self, self.obj)", "def test_deserialize_empty(self):\n # Completely empty\n out = self.instance.deserialize(self._empty_msg)\n self.assert_result_equal(out[0], self.instance._empty_msg)\n self.assert_equal(out[1], dict(size=0, incomplete=False))\n # Empty metadata and message\n out = self.instance.deserialize((2 * YGG_MSG_HEAD) + self._empty_msg)\n self.assert_result_equal(out[0], self.instance._empty_msg)\n self.assert_equal(out[1], dict(size=0, incomplete=False))", "def zeros(shape: any,\n dtype: any = float,\n order: {'C', 'F'} = 'C',\n *,\n alignment: int = 16,\n **kwargs):\n return empty(shape=shape,\n dtype=dtype,\n order=order,\n alignment=alignment,\n __gen__=np.zeros)", "def set_empty(self):\n pattern = [[0,0,0,0],\n [0,0,0,0],\n [0,0,0,0],\n [0,0,0,0]]\n self.set_pattern(pattern)", "def __init__(\n self,\n avr: AVR,\n name: str,\n mac_address: str,\n model: str,\n zone_number: int,\n entry_id: str,\n ) -> None:\n super().__init__()\n self.avr = avr\n self._entry_id = entry_id\n self._zone_number = zone_number\n self._zone = avr.zones[zone_number]\n if zone_number > 1:\n self._attr_name = f\"zone {zone_number}\"\n self._attr_unique_id = f\"{mac_address}_{zone_number}\"\n else:\n self._attr_name = None\n self._attr_unique_id = mac_address\n\n self._attr_device_info = DeviceInfo(\n identifiers={(DOMAIN, mac_address)},\n name=name,\n manufacturer=MANUFACTURER,\n model=model,\n )\n self.set_states()", "def from_empty(cls, mem):\n return cls(mem, size=0, capacity=mem.size)", "def _init_data(self) -> None:\n self.dtype = dict()\n self.shape = dict()\n self.size = dict()\n self.attrs = dict()\n self.data_ptr = dict()\n\n if self.mode == 'r':\n for k in self.fp.keys():\n self.dtype[k] = self.fp[k].dtype\n self.shape[k] = self.fp[k].shape\n self.size[k] = self.fp[k].shape[0]\n self.data_ptr[k] = 0", "def __init__(self, *args):\n this = _libsbml.new_UnitDefinition(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\r\n self.id = random.randint(0, 1 * (10 ** 9))\r\n self.title = None\r\n self.date = None\r\n self.time = None\r\n self.datetime = None\r\n self.duration = None\r\n self.notes = None\r\n self.recurring = None\r\n self.rec_interval = {\r\n \"unit\": None, \"skip\": None, \"days\": None, \"ordinal\": None,\r\n \"dates\": None, \"end\": None}\r\n self.rec_total = None\r\n self.rec_child_seq = None\r\n self.rec_parent = None\r\n self.info = {}", "def __init__(self, biogrf='332', descrp='', ff='DREIDING', atoms=None):\n self.biogrf = biogrf\n self.descrp = ''\n self.ff = ff\n super(self.__class__, self).__init__(atoms)", "def __init__(self, attributes=None):\n super().__init__(attributes)\n \n # processing parameters\n self.set = _Settings()\n\n # results storage\n self.measure_time = None # store here in case we average FIDs, filled by chain!\n self.frequency_shift = None\n self.phase_0 = None\n self.data = None\n \n if attributes is not None:\n self.inflate(attributes)\n\n self.chain = None", "def empty_copy(self):\n new_parser = VectorParser()\n new_parser.idxs_and_shapes = self.idxs_and_shapes.copy()\n new_parser.vect = None\n return new_parser", "def test_init_with_none(self):\n Digest()", "def test_empty_struct(self):\n empty = struct_pb2.Struct()\n deserialized = rpc.deserialize_resource_props(empty)\n self.assertDictEqual({}, deserialized)", "def __init__(self, xr, endElement=False):\n\n self.name = xr.LocalName\n self.namespace = xr.NamespaceURI\n self.prefix = xr.Prefix\n self.value = xr.Value\n if xr.IsEmptyElement and endElement:\n self.nodeType = XmlNodeType.EndElement\n else:\n self.nodeType = xr.NodeType\n\n if xr.NodeType == XmlNodeType.Element:\n self.attributes = []\n while xr.MoveToNextAttribute():\n if xr.NamespaceURI == 'http://www.w3.org/2000/xmlns/':\n continue\n self.attributes.append(XmlNode(xr))\n xr.MoveToElement()", "def make_blank_request(self, *args, **kwargs):\n factory = self.get(abcs.ARequest)\n request = factory.blank(*args, app=self, **kwargs)\n self._set_request_attributes(request)\n return request", "def __init__(self, lbda=None, bandname=None, zp=None, \n mjd=None, empty=False,**kwargs):\n self.__build__()\n if empty:\n return\n prop = kwargs_update(dict(lbda=lbda, bandname=bandname,mjd=mjd, zp=zp),\n **kwargs)\n self.create(**prop)", "def __init__(self):\n this = _libsbml.new_RDFAnnotationParser()\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, extra_fields=None):\n if extra_fields:\n self.fields.extend(extra_fields)\n self.data = {k: [] for k in self.fields}\n self.last_r = 0.0", "def _empty(self, *dims, **kwargs):\n size = []\n dtypes = []\n for d in dims:\n size.append(len(self[d]))\n dtypes.append(self[d].dtype)\n dtype = kwargs.pop('dtype', numpy.result_type(*dtypes))\n fv = kwargs.pop('fill_value')\n return numpy.full(size, fill_value=fv, dtype=dtype)", "def __init__(self, values=None, empty_loc=None):\n self.shape = len(values), len(values[0])\n self.h, self.w = self.shape\n # self._validate_values(values)\n self.values = values\n self.pad = 1\n if self.shape[0] > 3:\n self.pad = 2\n if self.shape[0] > 10:\n self.pad = 3\n self.came_from = None\n self.dir_from = None\n if empty_loc is None:\n self._find_empty()\n else:\n self.empty_loc = empty_loc\n self.g = 0\n self.heur = 0\n self.hash = hash(str(self))", "def empty(self):\n self.drop()\n self.create()", "def empty(cls):\n return Marker()", "def empty(self):", "def __init__(self, uid, arbor=None, root=False):\n self.uid = uid\n self.arbor = weakref.proxy(arbor)\n if root:\n self.root = -1\n self.field_data = FieldContainer(arbor)\n else:\n self.root = None", "def __init__(self) -> None:\n self.length = 0", "def create_training_file(D_RAT):\r\n return create_arff_file(D_RAT, 0)", "def __init__(self):\n self.ram = [0] * 256\n self.reg = [0] * 8\n self.pc = 0\n self.running = True\n self.flags = 0", "def empty(cls, n):\n l1 = [None] * n\n return cls(l1)", "def test_charge_increment_model_initialize_with_no_elements(self):\n ForceField(xml_charge_increment_model_formal_charges)", "def __init__(self,\n atoms: Union[List[Atom], Atoms, None] = None):\n self._atoms = Atoms(atoms) if atoms is not None else None", "def create_empty_node():\n from linked_list import Node\n return Node()", "def __init__(self, geo_model=None):\n self.rex_bytes = bytearray()\n self.n_bytes = 0\n\n self.data_id = 0\n self.geo_model = geo_model", "def __init__(self, model=None, azimuth=None, attenuation=None, attenuation_direct=None, attenuation_indirect=None, tilt=None): # noqa: E501 # noqa: E501\n self._model = None\n self._azimuth = None\n self._attenuation = None\n self._attenuation_direct = None\n self._attenuation_indirect = None\n self._tilt = None\n self.discriminator = None\n if model is not None:\n self.model = model\n if azimuth is not None:\n self.azimuth = azimuth\n if attenuation is not None:\n self.attenuation = attenuation\n if attenuation_direct is not None:\n self.attenuation_direct = attenuation_direct\n if attenuation_indirect is not None:\n self.attenuation_indirect = attenuation_indirect\n if tilt is not None:\n self.tilt = tilt", "def __init__(self, *args):\n this = _ida_hexrays.new_fnum_array(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, shape):\n self.eyes = [(), ()]\n self.shape = shape\n self.state = 0\n self.new_frame()", "def fromfileobj(cls, fileobj, fullparse=True):\n buf = fileobj.read(_ArInfoStruct.size)\n if not buf:\n return None\n\n if len(buf) < _ArInfoStruct.size:\n raise IOError(\n 'not enough data for header, got %r, needed %r' % (\n len(buf), _ArInfoStruct.size))\n\n name, mtime, uid, gid, mode, datasize, magic = _ArInfoStruct.unpack(buf)\n\n datasize = int(datasize)\n if fullparse:\n mtime = int(mtime)\n uid = int(uid)\n gid = int(gid)\n mode = int(mode, 8)\n\n if name.startswith('#1/'):\n arformat = AR_FORMAT_BSD\n\n try:\n filenamesize = int(name[3:])\n except ValueError:\n raise IOError('invalid file name length: %r' % name[3:])\n\n filename = fileobj.read(filenamesize)\n if len(filename) != filenamesize:\n raise IOError(\n 'not enough data for filename, got %r, needed %r' % (\n len(name), filenamesize))\n\n filesize = datasize - filenamesize\n\n elif name.startswith('/'):\n arformat = AR_FORMAT_SYSV\n raise SystemError('%s format is not supported.' % arformat)\n\n else:\n arformat = AR_FORMAT_SIMPLE\n filename = name.strip()\n filesize = datasize\n\n if magic != AR_MAGIC_BIT:\n raise IOError('file magic invalid, got %r, needed %r' % (\n magic, AR_MAGIC_BIT))\n\n return cls(\n arformat, filename.decode('utf-8'), filesize, mtime, uid, gid, mode)", "def __init__(self):\n self.x = {}\n self.len = 0\n self.annotations = {}", "def __init__(self):\n self.metadata = {}\n self.geometry = {'array': None, \n 'geom': None, \n 'wkt': None}", "def __init__(self, random_pad, checksum, seq_num):\n self.version = b\"\\x01\\x00\\x00\\x00\"\n self.random_pad = random_pad\n self.checksum = checksum\n self.seq_num = seq_num", "def __init__(self, osi, fy, e0, a, n):\n self.osi = osi\n self.fy = float(fy)\n self.e0 = float(e0)\n self.a = float(a)\n self.n = float(n)\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.e0, self.a, self.n]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)", "def test_empty_value(self):\n avp_val = avp.AVP(0)\n self.assertEqual(avp_val.value, None)\n self.assertEqual(avp_val.payload, None)\n\n # We can then set its value\n avp_val.value = b''\n self.assertEqual(avp_val.value, b'')\n self.assertEqual(avp_val.payload, b'')\n\n # And unset it again\n avp_val.value = None\n self.assertEqual(avp_val.value, None)\n self.assertEqual(avp_val.payload, None)", "def test_constructor_None(self):\n structure = MultiLingualTextStructure(None, use_default_for_empty=True)\n self.assertEqual(structure[\"nb\"], \"\")\n self.assertEqual(structure[\"en\"], \"\")", "def __init__(self):\n\n self.check_nans = False\n self.debug_force_memmap = False\n\n # Implementations must initialise the dtype so that feature arrays can be created with correct type:\n self.dtype = None", "def __init__(self, obj=None):\n if obj:\n self.rborder = obj.rborder\n self.lborder = obj.lborder\n self.tborder = obj.tborder\n self.bborder = obj.bborder\n self.padding = obj.padding\n self.longlist = obj.longlist\n else:\n self.rborder = 0\n self.lborder = 0\n self.tborder = 0\n self.bborder = 0\n self.padding = 0\n self.longlist = 0", "def writeArff(file_name, relation, classes, attrs, data):\n\tprint 'writeArff:', file_name, len(data), len(data[0])\n\tf = file(file_name, 'w')\n\tf.write('%\\n')\n\tf.write('%% %s \\n' % os.path.basename(file_name))\n\tf.write('%\\n')\n\tf.write('% Created by ' + os.path.basename(sys.argv[0]) + ' on ' + datetime.date.today().strftime(\"%A, %d %B %Y\") + '\\n')\n\tf.write('% Code at http://bit.ly/b7Kkqt\\n')\n\tf.write('%\\n')\n\tf.write('% Constructed from raw data in http://archive.ics.uci.edu/ml/machine-learning-databases/soybean/\\n')\n\tf.write('%% %d instances\\n' % len(data))\n\tf.write('%% %d attributes + 1 class = %d columns\\n' % (len(data[0]) - 1, len(data[0])))\n\tf.write('\\n')\n\tf.write('@RELATION ' + relation + '\\n\\n')\n\tf.write('@ATTRIBUTE %-15s {%s}\\n' % ('class', ','.join([x for x in classes if not x == '?'])))\n\tfor a in attrs:\n\t\tf.write('@ATTRIBUTE %-15s {%s}\\n' % (a['attr'], ','.join([x for x in a['vals'] if not x == '?'])))\n\tf.write('\\n@DATA\\n\\n')\n\tfor instance in data:\n\t\tf.write(', '.join(instance) + '\\n')\n\tf.close()\n\n\t\"\"\" Copy .arff files to .arff.txt so they can be viewed from Google docs \"\"\"\n\tprint 'writeArff:', file_name + '.txt', '-- duplicate'\n\tshutil.copyfile(file_name, file_name + '.txt')", "def __init__(self, initializer='None', struct=None, leaves=None):\n if (struct is not None and leaves is None) or (struct is None and leaves is not None):\n raise RuntimeError(\"if initializing with struct, also leaves have to be given (and reversely)\")\n\n if struct is not None: # then also leaves is not None\n self.struct = struct\n self.leaves = leaves\n elif initializer != 'None':\n self.struct = dict(list=[None], dict={}, pseudo=False, liftedkeys=False, n=1)\n self.leaves = [initializer]\n else:\n self.struct = dict(list=[], dict={}, pseudo=False, liftedkeys=False, n=0)\n self.leaves = []", "def test_parse_empty_file(self):\n bin.parser.parse_file(None, self.mock_db, self.tf, False)", "def init_zero(cls, h):\n shapes = Checkpoint.make_shaped_arrays(h)\n return jax.tree_util.tree_map(lambda s: np.zeros(s.shape, s.dtype), shapes)", "def __init__(self, preamble, label):\n\n self.fvals = preamble[:]\n self.label = label\n\n # for accessing rule literals by feature names\n self.by_name = collections.defaultdict(lambda: [])\n for fv in self.fvals:\n self.by_name[fv.feat].append(tuple([fv.val, fv.pos]))", "def test_zero_size_array_constructor():\n fcode = \"integer ::\"\n ast = Fortran2003.Ac_Spec(fcode)\n assert isinstance(ast, Fortran2003.Ac_Spec)\n assert isinstance(ast.children[0], Fortran2003.Intrinsic_Type_Spec)", "def CreateEmpty(cls, ir_id: int) -> \"GraphTuple\":\n return GraphTuple(\n ir_id=ir_id,\n node_count=0,\n control_edge_count=0,\n data_edge_count=0,\n call_edge_count=0,\n edge_position_max=0,\n pickled_graph_tuple_size=0,\n )" ]
[ "0.5822378", "0.5721686", "0.56984425", "0.56752235", "0.5657628", "0.5654186", "0.5644643", "0.5617209", "0.5603904", "0.55908096", "0.557001", "0.55659646", "0.5524445", "0.5469858", "0.54600406", "0.5455272", "0.54444385", "0.5356642", "0.5347853", "0.53235525", "0.5297517", "0.5286482", "0.5281225", "0.52621615", "0.52518743", "0.5212746", "0.51951885", "0.51726305", "0.51724577", "0.514426", "0.51254207", "0.51155436", "0.5112439", "0.5105025", "0.50947785", "0.5070256", "0.50569564", "0.5040763", "0.5030349", "0.5017299", "0.5014987", "0.501482", "0.5013818", "0.50126725", "0.5004132", "0.49995992", "0.49854165", "0.4969575", "0.4967047", "0.49640378", "0.49634504", "0.49593133", "0.49478996", "0.4942737", "0.49423552", "0.49189743", "0.49168283", "0.49126345", "0.48920545", "0.48778835", "0.48726654", "0.486619", "0.4865865", "0.48577708", "0.48540986", "0.48537245", "0.48431858", "0.48402706", "0.48375398", "0.4836735", "0.48289862", "0.4825945", "0.4823136", "0.48211262", "0.482087", "0.48119244", "0.4807838", "0.48039678", "0.4803083", "0.47984436", "0.47976238", "0.47971192", "0.47958493", "0.47939584", "0.47929844", "0.4792469", "0.4787581", "0.47853228", "0.47793126", "0.47671464", "0.47666967", "0.4766147", "0.47641563", "0.47636056", "0.47624204", "0.4754446", "0.47487527", "0.47486314", "0.4748137", "0.47455814", "0.47452444" ]
0.0
-1